diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed')
60 files changed, 85617 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile new file mode 100644 index 0000000000..3d2098f21b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +# Copyright (c) 2019-2020 Marvell International Ltd. + +obj-$(CONFIG_QED) := qed.o + +qed-y := \ + qed_chain.o \ + qed_cxt.o \ + qed_dcbx.o \ + qed_debug.o \ + qed_dev.o \ + qed_devlink.o \ + qed_hw.o \ + qed_init_fw_funcs.o \ + qed_init_ops.o \ + qed_int.o \ + qed_l2.o \ + qed_main.o \ + qed_mcp.o \ + qed_mng_tlv.o \ + qed_ptp.o \ + qed_selftest.o \ + qed_sp_commands.o \ + qed_spq.o + +qed-$(CONFIG_QED_FCOE) += qed_fcoe.o +qed-$(CONFIG_QED_ISCSI) += qed_iscsi.o +qed-$(CONFIG_QED_LL2) += qed_ll2.o +qed-$(CONFIG_QED_OOO) += qed_ooo.o + +qed-$(CONFIG_QED_NVMETCP) += \ + qed_nvmetcp.o \ + qed_nvmetcp_fw_funcs.o + +qed-$(CONFIG_QED_RDMA) += \ + qed_iwarp.o \ + qed_rdma.o \ + qed_roce.o + +qed-$(CONFIG_QED_SRIOV) += \ + qed_sriov.o \ + qed_vf.o diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h new file mode 100644 index 0000000000..1d719726f7 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -0,0 +1,1006 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_H +#define _QED_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/zlib.h> +#include <linux/hashtable.h> +#include <linux/qed/qed_if.h> +#include "qed_debug.h" +#include "qed_hsi.h" +#include "qed_dbg_hsi.h" +#include "qed_mfw_hsi.h" + +extern const struct qed_common_ops qed_common_ops_pass; + +#define STORM_FW_VERSION \ + ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ + (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) + +#define MAX_HWFNS_PER_DEVICE (4) +#define NAME_SIZE 16 +#define VER_SIZE 16 + +#define QED_WFQ_UNIT 100 + +#define QED_WID_SIZE (1024) +#define QED_MIN_WIDS (4) +#define QED_PF_DEMS_SIZE (4) + +#define QED_LLH_DONT_CARE 0 + +/* cau states */ +enum qed_coalescing_mode { + QED_COAL_MODE_DISABLE, + QED_COAL_MODE_ENABLE +}; + +enum qed_nvm_cmd { + QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, + QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA, + QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM, + QED_GET_MCP_NVM_RESP = 0xFFFFFF00 +}; + +struct qed_eth_cb_ops; +struct qed_dev_info; +union qed_mcp_protocol_stats; +enum qed_mcp_protocol_type; +enum qed_mfw_tlv_type; +union qed_mfw_tlv_data; + +/* helpers */ +#define QED_MFW_GET_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _SHIFT)) + +#define QED_MFW_SET_FIELD(name, field, value) \ + do { \ + (name) &= ~(field ## _MASK); \ + (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\ + } while (0) + +static inline u32 qed_db_addr(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + (cid * QED_PF_DEMS_SIZE); + + return db_addr; +} + +static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); + + return db_addr; +} + +#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ + ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \ + ~((1 << (p_hwfn->cdev->cache_shift)) - 1)) + +#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++) + +#define D_TRINE(val, cond1, cond2, true1, true2, def) \ + ((val) == (cond1) ? true1 : \ + ((val) == (cond2) ? true2 : def)) + +/* forward */ +struct qed_ptt_pool; +struct qed_spq; +struct qed_sb_info; +struct qed_sb_attn_info; +struct qed_cxt_mngr; +struct qed_sb_sp_info; +struct qed_ll2_info; +struct qed_mcp_info; +struct qed_llh_info; + +struct qed_rt_data { + u32 *init_val; + bool *b_valid; +}; + +enum qed_tunn_mode { + QED_MODE_L2GENEVE_TUNN, + QED_MODE_IPGENEVE_TUNN, + QED_MODE_L2GRE_TUNN, + QED_MODE_IPGRE_TUNN, + QED_MODE_VXLAN_TUNN, +}; + +enum qed_tunn_clss { + QED_TUNN_CLSS_MAC_VLAN, + QED_TUNN_CLSS_MAC_VNI, + QED_TUNN_CLSS_INNER_MAC_VLAN, + QED_TUNN_CLSS_INNER_MAC_VNI, + QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_QED_TUNN_CLSS, +}; + +struct qed_tunn_update_type { + bool b_update_mode; + bool b_mode_enabled; + enum qed_tunn_clss tun_cls; +}; + +struct qed_tunn_update_udp_port { + bool b_update_port; + u16 port; +}; + +struct qed_tunnel_info { + struct qed_tunn_update_type vxlan; + struct qed_tunn_update_type l2_geneve; + struct qed_tunn_update_type ip_geneve; + struct qed_tunn_update_type l2_gre; + struct qed_tunn_update_type ip_gre; + + struct qed_tunn_update_udp_port vxlan_port; + struct qed_tunn_update_udp_port geneve_port; + + bool b_update_rx_cls; + bool b_update_tx_cls; +}; + +struct qed_tunn_start_params { + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +struct qed_tunn_update_params { + unsigned long tunn_mode_update_mask; + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +/* The PCI personality is not quite synonymous to protocol ID: + * 1. All personalities need CORE connections + * 2. The Ethernet personality may support also the RoCE/iWARP protocol + */ +enum qed_pci_personality { + QED_PCI_ETH, + QED_PCI_FCOE, + QED_PCI_ISCSI, + QED_PCI_NVMETCP, + QED_PCI_ETH_ROCE, + QED_PCI_ETH_IWARP, + QED_PCI_ETH_RDMA, + QED_PCI_DEFAULT, /* default in shmem */ +}; + +/* All VFs are symmetric, all counters are PF + all VFs */ +struct qed_qm_iids { + u32 cids; + u32 vf_cids; + u32 tids; +}; + +/* HW / FW resources, output of features supported below, most information + * is received from MFW. + */ +enum qed_resources { + QED_SB, + QED_L2_QUEUE, + QED_VPORT, + QED_RSS_ENG, + QED_PQ, + QED_RL, + QED_MAC, + QED_VLAN, + QED_RDMA_CNQ_RAM, + QED_ILT, + QED_LL2_RAM_QUEUE, + QED_LL2_CTX_QUEUE, + QED_CMDQS_CQS, + QED_RDMA_STATS_QUEUE, + QED_BDQ, + QED_MAX_RESC, +}; + +enum QED_FEATURE { + QED_PF_L2_QUE, + QED_VF, + QED_RDMA_CNQ, + QED_NVMETCP_CQ, + QED_ISCSI_CQ, + QED_FCOE_CQ, + QED_VF_L2_QUE, + QED_MAX_FEATURES, +}; + +enum qed_dev_cap { + QED_DEV_CAP_ETH, + QED_DEV_CAP_FCOE, + QED_DEV_CAP_ISCSI, + QED_DEV_CAP_ROCE, + QED_DEV_CAP_IWARP, +}; + +enum qed_wol_support { + QED_WOL_SUPPORT_NONE, + QED_WOL_SUPPORT_PME, +}; + +enum qed_db_rec_exec { + DB_REC_DRY_RUN, + DB_REC_REAL_DEAL, + DB_REC_ONCE, +}; + +struct qed_hw_info { + /* PCI personality */ + enum qed_pci_personality personality; +#define QED_IS_RDMA_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_ROCE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_IWARP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \ + (dev)->hw_info.personality == QED_PCI_ETH_RDMA) +#define QED_IS_L2_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ETH || \ + QED_IS_RDMA_PERSONALITY(dev)) +#define QED_IS_FCOE_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_FCOE) +#define QED_IS_ISCSI_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_ISCSI) +#define QED_IS_NVMETCP_PERSONALITY(dev) \ + ((dev)->hw_info.personality == QED_PCI_NVMETCP) + + /* Resource Allocation scheme results */ + u32 resc_start[QED_MAX_RESC]; + u32 resc_num[QED_MAX_RESC]; +#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) +#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) + + u32 feat_num[QED_MAX_FEATURES]; +#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) + + /* Amount of traffic classes HW supports */ + u8 num_hw_tc; + + /* Amount of TCs which should be active according to DCBx or upper + * layer driver configuration. + */ + u8 num_active_tc; + + u8 offload_tc; + bool offload_tc_set; + + bool multi_tc_roce_en; +#define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en) + + u32 concrete_fid; + u16 opaque_fid; + u16 ovlan; + u32 part_num[4]; + + unsigned char hw_mac_addr[ETH_ALEN]; + u64 node_wwn; + u64 port_wwn; + + u16 num_fcoe_conns; + + struct qed_igu_info *p_igu_info; + + u32 hw_mode; + unsigned long device_capabilities; + u16 mtu; + + enum qed_wol_support b_wol_support; +}; + +/* maximun size of read/write commands (HW limit) */ +#define DMAE_MAX_RW_SIZE 0x2000 + +struct qed_dmae_info { + /* Mutex for synchronizing access to functions */ + struct mutex mutex; + + u8 channel; + + dma_addr_t completion_word_phys_addr; + + /* The memory location where the DMAE writes the completion + * value when an operation is finished on this context. + */ + u32 *p_completion_word; + + dma_addr_t intermediate_buffer_phys_addr; + + /* An intermediate buffer for DMAE operations that use virtual + * addresses - data is DMA'd to/from this buffer and then + * memcpy'd to/from the virtual address + */ + u32 *p_intermediate_buffer; + + dma_addr_t dmae_cmd_phys_addr; + struct dmae_cmd *p_dmae_cmd; +}; + +struct qed_wfq_data { + /* when feature is configured for at least 1 vport */ + u32 min_speed; + bool configured; +}; + +struct qed_qm_info { + struct init_qm_pq_params *qm_pq_params; + struct init_qm_vport_params *qm_vport_params; + struct init_qm_port_params *qm_port_params; + u16 start_pq; + u8 start_vport; + u16 pure_lb_pq; + u16 first_ofld_pq; + u16 first_llt_pq; + u16 pure_ack_pq; + u16 ooo_pq; + u16 first_vf_pq; + u16 first_mcos_pq; + u16 first_rl_pq; + u16 num_pqs; + u16 num_vf_pqs; + u8 num_vports; + u8 max_phys_tcs_per_port; + u8 ooo_tc; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + u8 pf_wfq; + u32 pf_rl; + struct qed_wfq_data *wfq_data; + u8 num_pf_rls; +}; + +#define QED_OVERFLOW_BIT 1 + +struct qed_db_recovery_info { + struct list_head list; + + /* Lock to protect the doorbell recovery mechanism list */ + spinlock_t lock; + bool dorq_attn; + u32 db_recovery_counter; + unsigned long overflow; +}; + +struct storm_stats { + u32 address; + u32 len; +}; + +struct qed_storm_stats { + struct storm_stats mstats; + struct storm_stats pstats; + struct storm_stats tstats; + struct storm_stats ustats; +}; + +struct qed_fw_data { + struct fw_ver_info *fw_ver_info; + const u8 *modes_tree_buf; + union init_op *init_ops; + const u32 *arr_data; + const u32 *fw_overlays; + u32 fw_overlays_len; + u32 init_ops_size; +}; + +enum qed_mf_mode_bit { + /* Supports PF-classification based on tag */ + QED_MF_OVLAN_CLSS, + + /* Supports PF-classification based on MAC */ + QED_MF_LLH_MAC_CLSS, + + /* Supports PF-classification based on protocol type */ + QED_MF_LLH_PROTO_CLSS, + + /* Requires a default PF to be set */ + QED_MF_NEED_DEF_PF, + + /* Allow LL2 to multicast/broadcast */ + QED_MF_LL2_NON_UNICAST, + + /* Allow Cross-PF [& child VFs] Tx-switching */ + QED_MF_INTER_PF_SWITCH, + + /* Unified Fabtic Port support enabled */ + QED_MF_UFP_SPECIFIC, + + /* Disable Accelerated Receive Flow Steering (aRFS) */ + QED_MF_DISABLE_ARFS, + + /* Use vlan for steering */ + QED_MF_8021Q_TAGGING, + + /* Use stag for steering */ + QED_MF_8021AD_TAGGING, + + /* Allow DSCP to TC mapping */ + QED_MF_DSCP_TO_TC_MAP, + + /* Do not insert a vlan tag with id 0 */ + QED_MF_DONT_ADD_VLAN0_TAG, +}; + +enum qed_ufp_mode { + QED_UFP_MODE_ETS, + QED_UFP_MODE_VNIC_BW, + QED_UFP_MODE_UNKNOWN +}; + +enum qed_ufp_pri_type { + QED_UFP_PRI_OS, + QED_UFP_PRI_VNIC, + QED_UFP_PRI_UNKNOWN +}; + +struct qed_ufp_info { + enum qed_ufp_pri_type pri_type; + enum qed_ufp_mode mode; + u8 tc; +}; + +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + +struct qed_nvm_image_info { + u32 num_images; + struct bist_nvm_image_att *image_att; + bool valid; +}; + +enum qed_hsi_def_type { + QED_HSI_DEF_MAX_NUM_VFS, + QED_HSI_DEF_MAX_NUM_L2_QUEUES, + QED_HSI_DEF_MAX_NUM_PORTS, + QED_HSI_DEF_MAX_SB_PER_PATH, + QED_HSI_DEF_MAX_NUM_PFS, + QED_HSI_DEF_MAX_NUM_VPORTS, + QED_HSI_DEF_NUM_ETH_RSS_ENGINE, + QED_HSI_DEF_MAX_QM_TX_QUEUES, + QED_HSI_DEF_NUM_PXP_ILT_RECORDS, + QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS, + QED_HSI_DEF_MAX_QM_GLOBAL_RLS, + QED_HSI_DEF_MAX_PBF_CMD_LINES, + QED_HSI_DEF_MAX_BTB_BLOCKS, + QED_NUM_HSI_DEFS +}; + +struct qed_simd_fp_handler { + void *token; + void (*func)(void *cookie); +}; + +enum qed_slowpath_wq_flag { + QED_SLOWPATH_MFW_TLV_REQ, + QED_SLOWPATH_PERIODIC_DB_REC, +}; + +struct qed_hwfn { + struct qed_dev *cdev; + u8 my_id; /* ID inside the PF */ +#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) + u8 rel_pf_id; /* Relative to engine*/ + u8 abs_pf_id; +#define QED_PATH_ID(_p_hwfn) \ + (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1)) + u8 port_id; + bool b_active; + + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + + bool hw_init_done; + + u8 num_funcs_on_engine; + u8 enabled_func_idx; + + /* BAR access */ + void __iomem *regview; + void __iomem *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PTT pool */ + struct qed_ptt_pool *p_ptt_pool; + + /* HW info */ + struct qed_hw_info hw_info; + + /* rt_array (for init-tool) */ + struct qed_rt_data rt_data; + + /* SPQ */ + struct qed_spq *p_spq; + + /* EQ */ + struct qed_eq *p_eq; + + /* Consolidate Q*/ + struct qed_consq *p_consq; + + /* Slow-Path definitions */ + struct tasklet_struct sp_dpc; + bool b_sp_dpc_enabled; + + struct qed_ptt *p_main_ptt; + struct qed_ptt *p_dpc_ptt; + + /* PTP will be used only by the leading function. + * Usage of all PTP-apis should be synchronized as result. + */ + struct qed_ptt *p_ptp_ptt; + + struct qed_sb_sp_info *p_sp_sb; + struct qed_sb_attn_info *p_sb_attn; + + /* Protocol related */ + bool using_ll2; + struct qed_ll2_info *p_ll2_info; + struct qed_ooo_info *p_ooo_info; + struct qed_rdma_info *p_rdma_info; + struct qed_iscsi_info *p_iscsi_info; + struct qed_nvmetcp_info *p_nvmetcp_info; + struct qed_fcoe_info *p_fcoe_info; + struct qed_pf_params pf_params; + + bool b_rdma_enabled_in_prs; + u32 rdma_prs_search_reg; + + struct qed_cxt_mngr *p_cxt_mngr; + + /* Flag indicating whether interrupts are enabled or not*/ + bool b_int_enabled; + bool b_int_requested; + + /* True if the driver requests for the link */ + bool b_drv_link_init; + + struct qed_vf_iov *vf_iov_info; + struct qed_pf_iov *pf_iov_info; + struct qed_mcp_info *mcp_info; + + struct qed_dcbx_info *p_dcbx_info; + + struct qed_ufp_info ufp_info; + + struct qed_dmae_info dmae_info; + + /* QM init */ + struct qed_qm_info qm_info; + struct qed_storm_stats storm_stats; + + /* Buffer for unzipping firmware data */ + void *unzip_buf; + + struct dbg_tools_data dbg_info; + void *dbg_user_info; + struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE]; + + /* PWM region specific data */ + u16 wid_count; + u32 dpi_size; + u32 dpi_count; + + /* This is used to calculate the doorbell address */ + u32 dpi_start_offset; + + /* If one of the following is set then EDPM shouldn't be used */ + u8 dcbx_no_edpm; + u8 db_bar_no_edpm; + + /* L2-related */ + struct qed_l2_info *p_l2_info; + + /* Mechanism for recovering from doorbell drop */ + struct qed_db_recovery_info db_recovery_info; + + /* Nvm images number and attributes */ + struct qed_nvm_image_info nvm_info; + + struct phys_mem_desc *fw_overlay_mem; + struct qed_ptt *p_arfs_ptt; + + struct qed_simd_fp_handler simd_proto_handler[64]; + +#ifdef CONFIG_QED_SRIOV + struct workqueue_struct *iov_wq; + struct delayed_work iov_task; + unsigned long iov_task_flags; +#endif + struct z_stream_s *stream; + bool slowpath_wq_active; + struct workqueue_struct *slowpath_wq; + struct delayed_work slowpath_task; + unsigned long slowpath_task_flags; + u32 periodic_db_rec_count; +}; + +struct pci_params { + int pm_cap; + + unsigned long mem_start; + unsigned long mem_end; + unsigned int irq; + u8 pf_num; +}; + +struct qed_int_param { + u32 int_mode; + u8 num_vectors; + u8 min_msix_cnt; /* for minimal functionality */ +}; + +struct qed_int_params { + struct qed_int_param in; + struct qed_int_param out; + struct msix_entry *msix_table; + bool fp_initialized; + u8 fp_msix_base; + u8 fp_msix_cnt; + u8 rdma_msix_base; + u8 rdma_msix_cnt; +}; + +struct qed_dbg_feature { + struct dentry *dentry; + u8 *dump_buf; + u32 buf_size; + u32 dumped_dwords; +}; + +struct qed_dev { + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + + enum qed_dev_type type; + /* Translate type/revision combo into the proper conditions */ +#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) +#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev)) +#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH) +#define QED_IS_K2(dev) QED_IS_AH(dev) + + u16 vendor_id; + + u16 device_id; +#define QED_DEV_ID_MASK 0xff00 +#define QED_DEV_ID_MASK_BB 0x1600 +#define QED_DEV_ID_MASK_AH 0x8000 + + u16 chip_num; +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 16 + + u16 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 12 +#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) + + u16 chip_metal; +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 4 + + u16 chip_bond_id; +#define CHIP_BOND_ID_MASK 0xf +#define CHIP_BOND_ID_SHIFT 0 + + u8 num_engines; + u8 num_ports; + u8 num_ports_in_engine; + u8 num_funcs_in_port; + + u8 path_id; + + unsigned long mf_bits; + + int pcie_width; + int pcie_speed; + + /* Add MF related configuration */ + u8 mcp_rev; + u8 boot_mode; + + /* WoL related configurations */ + u8 wol_config; + u8 wol_mac[ETH_ALEN]; + + u32 int_mode; + enum qed_coalescing_mode int_coalescing_mode; + u16 rx_coalesce_usecs; + u16 tx_coalesce_usecs; + + /* Start Bar offset of first hwfn */ + void __iomem *regview; + void __iomem *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PCI */ + u8 cache_shift; + + /* Init */ + const u32 *iro_arr; +#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr) + + /* HW functions */ + u8 num_hwfns; + struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + + /* Engine affinity */ + u8 l2_affin_hint; + u8 fir_affin; + u8 iwarp_affin; + + /* SRIOV */ + struct qed_hw_sriov_info *p_iov_info; +#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info) + struct qed_tunnel_info tunnel; + bool b_is_vf; + u32 drv_type; + struct qed_eth_stats *reset_stats; + struct qed_fw_data *fw_data; + + u32 mcp_nvm_resp; + + /* Recovery */ + bool recov_in_prog; + + /* Indicates whether should prevent attentions from being reasserted */ + bool attn_clr_en; + + /* LLH info */ + u8 ppfid_bitmap; + struct qed_llh_info *p_llh_info; + + /* Linux specific here */ + struct qed_dev_info common_dev_info; + struct qede_dev *edev; + struct pci_dev *pdev; + u32 flags; +#define QED_FLAG_STORAGE_STARTED (BIT(0)) + int msg_enable; + + struct pci_params pci_params; + + struct qed_int_params int_params; + + u8 protocol; +#define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH) +#define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE) + + /* Callbacks to protocol driver */ + union { + struct qed_common_cb_ops *common; + struct qed_eth_cb_ops *eth; + struct qed_fcoe_cb_ops *fcoe; + struct qed_iscsi_cb_ops *iscsi; + struct qed_nvmetcp_cb_ops *nvmetcp; + } protocol_ops; + void *ops_cookie; + +#ifdef CONFIG_QED_LL2 + struct qed_cb_ll2_info *ll2; + u8 ll2_mac_address[ETH_ALEN]; +#endif + struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM]; + u8 engine_for_debug; + bool disable_ilt_dump; + bool dbg_bin_dump; + + DECLARE_HASHTABLE(connections, 10); + const struct firmware *firmware; + + bool print_dbg_data; + + u32 rdma_max_sge; + u32 rdma_max_inline; + u32 rdma_max_srq_sge; + u16 tunn_feature_mask; + + bool iwarp_cmt; +}; + +u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); + +#define NUM_OF_VFS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS) +#define NUM_OF_L2_QUEUES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES) +#define NUM_OF_PORTS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS) +#define NUM_OF_SBS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH) +#define NUM_OF_ENG_PFS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS) +#define NUM_OF_VPORTS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS) +#define NUM_OF_RSS_ENGINES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE) +#define NUM_OF_QM_TX_QUEUES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES) +#define NUM_OF_PXP_ILT_RECORDS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS) +#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS) +#define NUM_OF_QM_GLOBAL_RLS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS) +#define NUM_OF_PBF_CMD_LINES(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES) +#define NUM_OF_BTB_BLOCKS(dev) \ + qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS) + +/** + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. + * + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. + * + * Return: inline u8. + */ +static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, + u32 concrete_fid) +{ + u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); + u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + u8 vf_valid = GET_FIELD(concrete_fid, + PXP_CONCRETE_FID_VFVALID); + u8 sw_fid; + + if (vf_valid) + sw_fid = vfid + MAX_NUM_PFS; + else + sw_fid = pfid; + + return sw_fid; +} + +#define PKT_LB_TC 9 + +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, + u32 min_pf_rate); + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac); + +#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0]) +#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1) +/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */ +#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin]) +#define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin]) +#define QED_AFFIN_HWFN(dev) \ + (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \ + QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev)) +#define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1) + +/* Flags for indication of required queues */ +#define PQ_FLAGS_RLS (BIT(0)) +#define PQ_FLAGS_MCOS (BIT(1)) +#define PQ_FLAGS_LB (BIT(2)) +#define PQ_FLAGS_OOO (BIT(3)) +#define PQ_FLAGS_ACK (BIT(4)) +#define PQ_FLAGS_OFLD (BIT(5)) +#define PQ_FLAGS_VFS (BIT(6)) +#define PQ_FLAGS_LLT (BIT(7)) +#define PQ_FLAGS_MTC (BIT(8)) + +/* physical queue index for cm context initialization */ +u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags); +u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc); +u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf); +u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc); +u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); + +/* doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn); +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); + +#define GET_GTT_REG_ADDR(__base, __offset, __idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx))) + +#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \ + ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx))) + +/* Other Linux specific common definitions */ +#define DP_NAME(cdev) ((cdev)->name) + +#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\ + ((cdev)->regview) + \ + (offset))) + +#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset)) +#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset)) +#define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset)) + +#define DOORBELL(cdev, db_addr, val) \ + writel((u32)val, (void __iomem *)((u8 __iomem *)\ + ((cdev)->doorbells) + (db_addr))) + +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + qed_device_num_ports((_p_hwfn)->cdev)) +int qed_device_num_ports(struct qed_dev *cdev); + +/* Prototypes */ +int qed_fill_dev_info(struct qed_dev *cdev, + struct qed_dev_info *dev_info); +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); +void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt); +u32 qed_unzip_data(struct qed_hwfn *p_hwfn, + u32 input_len, u8 *input_buf, + u32 max_size, u8 *unzip_buf); +int qed_recovery_process(struct qed_dev *cdev); +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn); +void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, + enum qed_hw_err_type err_type); +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats); +int qed_slowpath_irq_req(struct qed_hwfn *hwfn); +void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn); +int qed_mfw_tlv_req(struct qed_hwfn *hwfn); + +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, + enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_data); + +void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn); + +int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port); +int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port); +void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port); +void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port); +void qed_llh_clear_all_filters(struct qed_dev *cdev); +unsigned long qed_get_epoch_time(void); +#endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_chain.c b/drivers/net/ethernet/qlogic/qed/qed_chain.c new file mode 100644 index 0000000000..b83d17b14e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_chain.c @@ -0,0 +1,371 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2020 Marvell International Ltd. */ + +#include <linux/dma-mapping.h> +#include <linux/qed/qed_chain.h> +#include <linux/vmalloc.h> + +#include "qed_dev_api.h" + +static void qed_chain_init(struct qed_chain *chain, + const struct qed_chain_init_params *params, + u32 page_cnt) +{ + memset(chain, 0, sizeof(*chain)); + + chain->elem_size = params->elem_size; + chain->intended_use = params->intended_use; + chain->mode = params->mode; + chain->cnt_type = params->cnt_type; + + chain->elem_per_page = ELEMS_PER_PAGE(params->elem_size, + params->page_size); + chain->usable_per_page = USABLE_ELEMS_PER_PAGE(params->elem_size, + params->page_size, + params->mode); + chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(params->elem_size, + params->mode); + + chain->elem_per_page_mask = chain->elem_per_page - 1; + chain->next_page_mask = chain->usable_per_page & + chain->elem_per_page_mask; + + chain->page_size = params->page_size; + chain->page_cnt = page_cnt; + chain->capacity = chain->usable_per_page * page_cnt; + chain->size = chain->elem_per_page * page_cnt; + + if (params->ext_pbl_virt) { + chain->pbl_sp.table_virt = params->ext_pbl_virt; + chain->pbl_sp.table_phys = params->ext_pbl_phys; + + chain->b_external_pbl = true; + } +} + +static void qed_chain_init_next_ptr_elem(const struct qed_chain *chain, + void *virt_curr, void *virt_next, + dma_addr_t phys_next) +{ + struct qed_chain_next *next; + u32 size; + + size = chain->elem_size * chain->usable_per_page; + next = virt_curr + size; + + DMA_REGPAIR_LE(next->next_phys, phys_next); + next->next_virt = virt_next; +} + +static void qed_chain_init_mem(struct qed_chain *chain, void *virt_addr, + dma_addr_t phys_addr) +{ + chain->p_virt_addr = virt_addr; + chain->p_phys_addr = phys_addr; +} + +static void qed_chain_free_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct qed_chain_next *next; + dma_addr_t phys, phys_next; + void *virt, *virt_next; + u32 size, i; + + size = chain->elem_size * chain->usable_per_page; + virt = chain->p_virt_addr; + phys = chain->p_phys_addr; + + for (i = 0; i < chain->page_cnt; i++) { + if (!virt) + break; + + next = virt + size; + virt_next = next->next_virt; + phys_next = HILO_DMA_REGPAIR(next->next_phys); + + dma_free_coherent(dev, chain->page_size, virt, phys); + + virt = virt_next; + phys = phys_next; + } +} + +static void qed_chain_free_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + if (!chain->p_virt_addr) + return; + + dma_free_coherent(&cdev->pdev->dev, chain->page_size, + chain->p_virt_addr, chain->p_phys_addr); +} + +static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *entry; + u32 i; + + if (!chain->pbl.pp_addr_tbl) + return; + + for (i = 0; i < chain->page_cnt; i++) { + entry = chain->pbl.pp_addr_tbl + i; + if (!entry->virt_addr) + break; + + dma_free_coherent(dev, chain->page_size, entry->virt_addr, + entry->dma_map); + } + + if (!chain->b_external_pbl) + dma_free_coherent(dev, chain->pbl_sp.table_size, + chain->pbl_sp.table_virt, + chain->pbl_sp.table_phys); + + vfree(chain->pbl.pp_addr_tbl); + chain->pbl.pp_addr_tbl = NULL; +} + +/** + * qed_chain_free() - Free chain DMA memory. + * + * @cdev: Main device structure. + * @chain: Chain to free. + */ +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain) +{ + switch (chain->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + qed_chain_free_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + qed_chain_free_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + qed_chain_free_pbl(cdev, chain); + break; + default: + return; + } + + qed_chain_init_mem(chain, NULL, 0); +} + +static int +qed_chain_alloc_sanity_check(struct qed_dev *cdev, + const struct qed_chain_init_params *params, + u32 page_cnt) +{ + u64 chain_size; + + chain_size = ELEMS_PER_PAGE(params->elem_size, params->page_size); + chain_size *= page_cnt; + + if (!chain_size) + return -EINVAL; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into account the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of u32 type. + */ + switch (params->cnt_type) { + case QED_CHAIN_CNT_TYPE_U16: + if (chain_size > U16_MAX + 1) + break; + + return 0; + case QED_CHAIN_CNT_TYPE_U32: + if (chain_size > U32_MAX) + break; + + return 0; + default: + return -EINVAL; + } + + DP_NOTICE(cdev, + "The actual chain size (0x%llx) is larger than the maximal possible value\n", + chain_size); + + return -EINVAL; +} + +static int qed_chain_alloc_next_ptr(struct qed_dev *cdev, + struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + void *virt, *virt_prev = NULL; + dma_addr_t phys; + u32 i; + + for (i = 0; i < chain->page_cnt; i++) { + virt = dma_alloc_coherent(dev, chain->page_size, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } else { + qed_chain_init_next_ptr_elem(chain, virt_prev, virt, + phys); + } + + virt_prev = virt; + } + + /* Last page's next element should point to the beginning of the + * chain. + */ + qed_chain_init_next_ptr_elem(chain, virt_prev, chain->p_virt_addr, + chain->p_phys_addr); + + return 0; +} + +static int qed_chain_alloc_single(struct qed_dev *cdev, + struct qed_chain *chain) +{ + dma_addr_t phys; + void *virt; + + virt = dma_alloc_coherent(&cdev->pdev->dev, chain->page_size, + &phys, GFP_KERNEL); + if (!virt) + return -ENOMEM; + + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + + return 0; +} + +static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *chain) +{ + struct device *dev = &cdev->pdev->dev; + struct addr_tbl_entry *addr_tbl; + dma_addr_t phys, pbl_phys; + __le64 *pbl_virt; + u32 page_cnt, i; + size_t size; + void *virt; + + page_cnt = chain->page_cnt; + + size = array_size(page_cnt, sizeof(*addr_tbl)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + addr_tbl = vzalloc(size); + if (!addr_tbl) + return -ENOMEM; + + chain->pbl.pp_addr_tbl = addr_tbl; + + if (chain->b_external_pbl) { + pbl_virt = chain->pbl_sp.table_virt; + goto alloc_pages; + } + + size = array_size(page_cnt, sizeof(*pbl_virt)); + if (unlikely(size == SIZE_MAX)) + return -EOVERFLOW; + + pbl_virt = dma_alloc_coherent(dev, size, &pbl_phys, GFP_KERNEL); + if (!pbl_virt) + return -ENOMEM; + + chain->pbl_sp.table_virt = pbl_virt; + chain->pbl_sp.table_phys = pbl_phys; + chain->pbl_sp.table_size = size; + +alloc_pages: + for (i = 0; i < page_cnt; i++) { + virt = dma_alloc_coherent(dev, chain->page_size, &phys, + GFP_KERNEL); + if (!virt) + return -ENOMEM; + + if (i == 0) { + qed_chain_init_mem(chain, virt, phys); + qed_chain_reset(chain); + } + + /* Fill the PBL table with the physical address of the page */ + pbl_virt[i] = cpu_to_le64(phys); + + /* Keep the virtual address of the page */ + addr_tbl[i].virt_addr = virt; + addr_tbl[i].dma_map = phys; + } + + return 0; +} + +/** + * qed_chain_alloc() - Allocate and initialize a chain. + * + * @cdev: Main device structure. + * @chain: Chain to be processed. + * @params: Chain initialization parameters. + * + * Return: 0 on success, negative errno otherwise. + */ +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params) +{ + u32 page_cnt; + int rc; + + if (!params->page_size) + params->page_size = QED_CHAIN_PAGE_SIZE; + + if (params->mode == QED_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = QED_CHAIN_PAGE_CNT(params->num_elems, + params->elem_size, + params->page_size, + params->mode); + + rc = qed_chain_alloc_sanity_check(cdev, params, page_cnt); + if (rc) { + DP_NOTICE(cdev, + "Cannot allocate a chain with the given arguments:\n"); + DP_NOTICE(cdev, + "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu, page_size %u]\n", + params->intended_use, params->mode, params->cnt_type, + params->num_elems, params->elem_size, + params->page_size); + return rc; + } + + qed_chain_init(chain, params, page_cnt); + + switch (params->mode) { + case QED_CHAIN_MODE_NEXT_PTR: + rc = qed_chain_alloc_next_ptr(cdev, chain); + break; + case QED_CHAIN_MODE_SINGLE: + rc = qed_chain_alloc_single(cdev, chain); + break; + case QED_CHAIN_MODE_PBL: + rc = qed_chain_alloc_pbl(cdev, chain); + break; + default: + return -EINVAL; + } + + if (!rc) + return 0; + + qed_chain_free(cdev, chain); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c new file mode 100644 index 0000000000..33f4f58ee5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -0,0 +1,2571 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/log2.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_rdma.h" +#include "qed_reg_addr.h" +#include "qed_sriov.h" + +/* QM constants */ +#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */ + +/* Doorbell-Queue constants */ +#define DQ_RANGE_SHIFT 4 +#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT) + +/* Searcher constants */ +#define SRC_MIN_NUM_ELEMS 256 + +/* Timers constants */ +#define TM_SHIFT 7 +#define TM_ALIGN BIT(TM_SHIFT) +#define TM_ELEM_SIZE 4 + +#define ILT_DEFAULT_HW_P_SIZE 4 + +#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) +#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET + +/* ILT entry structure */ +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12) +#define ILT_ENTRY_PHY_ADDR_SHIFT 0 +#define ILT_ENTRY_VALID_MASK 0x1ULL +#define ILT_ENTRY_VALID_SHIFT 52 +#define ILT_ENTRY_IN_REGS 2 +#define ILT_REG_SIZE_IN_BYTES 4 + +/* connection context union */ +union conn_context { + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; + struct iscsi_conn_context iscsi_ctx; + struct fcoe_conn_context fcoe_ctx; + struct roce_conn_context roce_ctx; +}; + +/* TYPE-0 task context - iSCSI, FCOE */ +union type0_task_context { + struct iscsi_task_context iscsi_ctx; + struct fcoe_task_context fcoe_ctx; +}; + +/* TYPE-1 task context - ROCE */ +union type1_task_context { + struct rdma_task_context roce_ctx; +}; + +struct src_ent { + __u8 opaque[56]; + __be64 next; +}; + +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12) + +#define CONN_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) + +#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context)) +#define XRC_SRQ_CXT_SIZE (sizeof(struct rdma_xrc_srq_context)) + +#define TYPE0_TASK_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn) + +/* Alignment is inherent to the type1_task_context structure */ +#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context) + +static bool src_proto(enum protocol_type type) +{ + return type == PROTOCOLID_TCP_ULP || + type == PROTOCOLID_FCOE || + type == PROTOCOLID_IWARP; +} + +static bool tm_cid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_TCP_ULP || + type == PROTOCOLID_FCOE || + type == PROTOCOLID_ROCE || + type == PROTOCOLID_IWARP; +} + +static bool tm_tid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_FCOE; +} + +/* counts the iids for the CDU/CDUC ILT client configuration */ +struct qed_cdu_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr, + struct qed_cdu_iids *iids) +{ + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->pf_cids += p_mngr->conn_cfg[type].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } +} + +/* counts the iids for the Searcher block configuration */ +struct qed_src_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr, + struct qed_src_iids *iids) +{ + u32 i; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (!src_proto(i)) + continue; + + iids->pf_cids += p_mngr->conn_cfg[i].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; + } + + /* Add L2 filtering filters in addition */ + iids->pf_cids += p_mngr->arfs_count; +} + +/* counts the iids for the Timers block configuration */ +struct qed_tm_iids { + u32 pf_cids; + u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ + u32 pf_tids_total; + u32 per_vf_cids; + u32 per_vf_tids; +}; + +static void qed_cxt_tm_iids(struct qed_hwfn *p_hwfn, + struct qed_cxt_mngr *p_mngr, + struct qed_tm_iids *iids) +{ + bool tm_vf_required = false; + bool tm_required = false; + int i, j; + + /* Timers is a special case -> we don't count how many cids require + * timers but what's the max cid that will be used by the timer block. + * therefore we traverse in reverse order, and once we hit a protocol + * that requires the timers memory, we'll sum all the protocols up + * to that one. + */ + for (i = MAX_CONN_TYPES - 1; i >= 0; i--) { + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; + + if (tm_cid_proto(i) || tm_required) { + if (p_cfg->cid_count) + tm_required = true; + + iids->pf_cids += p_cfg->cid_count; + } + + if (tm_cid_proto(i) || tm_vf_required) { + if (p_cfg->cids_per_vf) + tm_vf_required = true; + + iids->per_vf_cids += p_cfg->cids_per_vf; + } + + if (tm_tid_proto(i)) { + struct qed_tid_seg *segs = p_cfg->tid_seg; + + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->pf_tids[j] += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + } + + iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN); + iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN); + iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN); + + for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { + iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN); + iids->pf_tids_total += iids->pf_tids[j]; + } +} + +static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn, + struct qed_qm_iids *iids) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_tid_seg *segs; + u32 vf_cids = 0, type, j; + u32 vf_tids = 0; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->cids += p_mngr->conn_cfg[type].cid_count; + vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + + segs = p_mngr->conn_cfg[type].tid_seg; + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->tids += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + + iids->vf_cids = vf_cids; + iids->tids += vf_tids * p_mngr->vf_count; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", + iids->cids, iids->vf_cids, iids->tids, vf_tids); +} + +static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn, + u32 seg) +{ + struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; + u32 i; + + /* Find the protocol with tid count > 0 for this segment. + * Note: there can only be one and this is already validated. + */ + for (i = 0; i < MAX_CONN_TYPES; i++) + if (p_cfg->conn_cfg[i].tid_seg[seg].count) + return &p_cfg->conn_cfg[i].tid_seg[seg]; + return NULL; +} + +static void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, + u32 num_srqs, u32 num_xrc_srqs) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + + p_mgr->srq_count = num_srqs; + p_mgr->xrc_srq_count = num_xrc_srqs; +} + +u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli = &p_mngr->clients[ilt_client]; + + return ILT_PAGE_IN_BYTES(p_cli->p_size.val); +} + +static u32 qed_cxt_xrc_srqs_per_page(struct qed_hwfn *p_hwfn) +{ + u32 page_size; + + page_size = qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM); + return page_size / XRC_SRQ_CXT_SIZE; +} + +u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + u32 total_srqs; + + total_srqs = p_mgr->srq_count + p_mgr->xrc_srq_count; + + return total_srqs; +} + +/* set the iids count per protocol */ +static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, + u32 cid_count, u32 vf_cid_cnt) +{ + struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; + + p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN); + p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN); + + if (type == PROTOCOLID_ROCE) { + u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val; + u32 cxt_size = CONN_CXT_SIZE(p_hwfn); + u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + u32 align = elems_per_page * DQ_RANGE_ALIGN; + + p_conn->cid_count = roundup(p_conn->cid_count, align); + } +} + +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid) +{ + if (vf_cid) + *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; + + return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; +} + +u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + return p_hwfn->p_cxt_mngr->acquired[type].start_cid; +} + +u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type) +{ + u32 cnt = 0; + int i; + + for (i = 0; i < TASK_SEGMENTS; i++) + cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; + + return cnt; +} + +static void qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, + u8 seg_type, u32 count, bool has_fl) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + p_seg->count = count; + p_seg->has_fl_mem = has_fl; + p_seg->type = seg_type; +} + +static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli, + struct qed_ilt_cli_blk *p_blk, + u32 start_line, u32 total_size, u32 elem_size) +{ + u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + /* verify thatits called only once for each block */ + if (p_blk->total_size) + return; + + p_blk->total_size = total_size; + p_blk->real_size_in_page = 0; + if (elem_size) + p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; + p_blk->start_line = start_line; +} + +static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn, + struct qed_ilt_client_cfg *p_cli, + struct qed_ilt_cli_blk *p_blk, + u32 *p_line, enum ilt_clients client_id) +{ + if (!p_blk->total_size) + return; + + if (!p_cli->active) + p_cli->first.val = *p_line; + + p_cli->active = true; + *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); + p_cli->last.val = *p_line - 1; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", + client_id, p_cli->first.val, + p_cli->last.val, p_blk->total_size, + p_blk->real_size_in_page, p_blk->start_line); +} + +static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; + struct qed_ilt_client_cfg *p_cli; + u32 lines_to_skip = 0; + u32 cxts_per_p; + + if (ilt_client == ILT_CLI_CDUC) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + + cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / + (u32) CONN_CXT_SIZE(p_hwfn); + + lines_to_skip = cid_count / cxts_per_p; + } + + return lines_to_skip; +} + +static struct qed_ilt_client_cfg *qed_cxt_set_cli(struct qed_ilt_client_cfg + *p_cli) +{ + p_cli->active = false; + p_cli->first.val = 0; + p_cli->last.val = 0; + return p_cli; +} + +static struct qed_ilt_cli_blk *qed_cxt_set_blk(struct qed_ilt_cli_blk *p_blk) +{ + p_blk->total_size = 0; + return p_blk; +} + +static void qed_cxt_ilt_blk_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 cli_idx, blk_idx; + + for (cli_idx = 0; cli_idx < MAX_ILT_CLIENTS; cli_idx++) { + for (blk_idx = 0; blk_idx < ILT_CLI_PF_BLOCKS; blk_idx++) + clients[cli_idx].pf_blks[blk_idx].total_size = 0; + + for (blk_idx = 0; blk_idx < ILT_CLI_VF_BLOCKS; blk_idx++) + clients[cli_idx].vf_blks[blk_idx].total_size = 0; + } +} + +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 curr_line, total, i, task_size, line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + struct qed_cdu_iids cdu_iids; + struct qed_src_iids src_iids; + struct qed_qm_iids qm_iids; + struct qed_tm_iids tm_iids; + struct qed_tid_seg *p_seg; + + memset(&qm_iids, 0, sizeof(qm_iids)); + memset(&cdu_iids, 0, sizeof(cdu_iids)); + memset(&src_iids, 0, sizeof(src_iids)); + memset(&tm_iids, 0, sizeof(tm_iids)); + + p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT); + + /* Reset all ILT blocks at the beginning of ILT computing in order + * to prevent memory allocation for irrelevant blocks afterwards. + */ + qed_cxt_ilt_blk_reset(p_hwfn); + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "hwfn [%d] - Set context manager starting line to be 0x%08x\n", + p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); + + /* CDUC */ + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUC]); + + curr_line = p_mngr->pf_start_line; + + /* CDUC PF */ + p_cli->pf_total_lines = 0; + + /* get the counters for the CDUC and QM clients */ + qed_cxt_cdu_iids(p_mngr, &cdu_iids); + + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUC_BLK]); + + total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); + + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn, + ILT_CLI_CDUC); + + /* CDUC VF */ + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUC_BLK]); + total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); + + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->vf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUC); + + /* CDUT PF */ + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_CDUT]); + p_cli->first.val = curr_line; + + /* first the 'working' task memory */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[CDUT_SEG_BLK(i)]); + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + + /* next the 'init' task memory (forced load memory) */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = + qed_cxt_set_blk(&p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]); + + if (!p_seg->has_fl_mem) { + /* The segment is active (total size pf 'working' + * memory is > 0) but has no FL (forced-load, Init) + * memory. Thus: + * + * 1. The total-size in the corrsponding FL block of + * the ILT client is set to 0 - No ILT line are + * provisioned and no ILT memory allocated. + * + * 2. The start-line of said block is set to the + * start line of the matching working memory + * block in the ILT client. This is later used to + * configure the CDU segment offset registers and + * results in an FL command for TIDs of this + * segement behaves as regular load commands + * (loading TIDs from the working memory). + */ + line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; + + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + continue; + } + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; + + /* CDUT VF */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); + if (p_seg && p_seg->count) { + /* Stricly speaking we need to iterate over all VF + * task segment types, but a VF has only 1 segment + */ + + /* 'working' memory */ + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[CDUT_SEG_BLK(0)]); + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* 'init' memory */ + p_blk = + qed_cxt_set_blk(&p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]); + if (!p_seg->has_fl_mem) { + /* see comment above */ + line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; + qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + } else { + task_size = p_mngr->task_type_size[p_seg->type]; + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, task_size); + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->vf_total_lines = curr_line - + p_cli->vf_blks[0].start_line; + + /* Now for the rest of the VFs */ + for (i = 1; i < p_mngr->vf_count; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + } + + /* QM */ + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_QM]); + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); + + qed_cxt_qm_iids(p_hwfn, &qm_iids); + total = qed_qm_pf_mem_size(qm_iids.cids, + qm_iids.vf_cids, qm_iids.tids, + p_hwfn->qm_info.num_pqs, + p_hwfn->qm_info.num_vf_pqs); + + DP_VERBOSE(p_hwfn, + QED_MSG_ILT, + "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n", + qm_iids.cids, + qm_iids.vf_cids, + qm_iids.tids, + p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); + + qed_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total * 0x1000, + QM_PQ_ELEMENT_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + /* SRC */ + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_SRC]); + qed_cxt_src_iids(p_mngr, &src_iids); + + /* Both the PF and VFs searcher connections are stored in the per PF + * database. Thus sum the PF searcher cids and all the VFs searcher + * cids. + */ + total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (total) { + u32 local_max = max_t(u32, total, + SRC_MIN_NUM_ELEMS); + + total = roundup_pow_of_two(local_max); + + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * sizeof(struct src_ent), + sizeof(struct src_ent)); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_SRC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM PF */ + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TM]); + qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM VF */ + total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; + if (total) { + p_blk = qed_cxt_set_blk(&p_cli->vf_blks[0]); + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + + p_cli->vf_total_lines = curr_line - p_blk->start_line; + for (i = 1; i < p_mngr->vf_count; i++) + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + } + + /* TSDM (SRQ CONTEXT) */ + total = qed_cxt_get_total_srq_count(p_hwfn); + + if (total) { + p_cli = qed_cxt_set_cli(&p_mngr->clients[ILT_CLI_TSDM]); + p_blk = qed_cxt_set_blk(&p_cli->pf_blks[SRQ_BLK]); + qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * SRQ_CXT_SIZE, SRQ_CXT_SIZE); + + qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TSDM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + *line_count = curr_line - p_hwfn->p_cxt_mngr->pf_start_line; + + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > + RESC_NUM(p_hwfn, QED_ILT)) + return -EINVAL; + + return 0; +} + +u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines) +{ + struct qed_ilt_client_cfg *p_cli; + u32 excess_lines, available_lines; + struct qed_cxt_mngr *p_mngr; + u32 ilt_page_size, elem_size; + struct qed_tid_seg *p_seg; + int i; + + available_lines = RESC_NUM(p_hwfn, QED_ILT); + excess_lines = used_lines - available_lines; + + if (!excess_lines) + return 0; + + if (!QED_IS_RDMA_PERSONALITY(p_hwfn)) + return 0; + + p_mngr = p_hwfn->p_cxt_mngr; + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + ilt_page_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + elem_size = p_mngr->task_type_size[p_seg->type]; + if (!elem_size) + continue; + + return (ilt_page_size / elem_size) * excess_lines; + } + + DP_NOTICE(p_hwfn, "failed computing excess ILT lines\n"); + return 0; +} + +static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn) +{ + struct qed_src_t2 *p_t2 = &p_hwfn->p_cxt_mngr->src_t2; + u32 i; + + if (!p_t2 || !p_t2->dma_mem) + return; + + for (i = 0; i < p_t2->num_pages; i++) + if (p_t2->dma_mem[i].virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_t2->dma_mem[i].size, + p_t2->dma_mem[i].virt_addr, + p_t2->dma_mem[i].phys_addr); + + kfree(p_t2->dma_mem); + p_t2->dma_mem = NULL; +} + +static int +qed_cxt_t2_alloc_pages(struct qed_hwfn *p_hwfn, + struct qed_src_t2 *p_t2, u32 total_size, u32 page_size) +{ + void **p_virt; + u32 size, i; + + if (!p_t2 || !p_t2->dma_mem) + return -EINVAL; + + for (i = 0; i < p_t2->num_pages; i++) { + size = min_t(u32, total_size, page_size); + p_virt = &p_t2->dma_mem[i].virt_addr; + + *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + size, + &p_t2->dma_mem[i].phys_addr, + GFP_KERNEL); + if (!p_t2->dma_mem[i].virt_addr) + return -ENOMEM; + + memset(*p_virt, 0, size); + p_t2->dma_mem[i].size = size; + total_size -= size; + } + + return 0; +} + +static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_num, total_size, ent_per_page, psz, i; + struct phys_mem_desc *p_t2_last_page; + struct qed_ilt_client_cfg *p_src; + struct qed_src_iids src_iids; + struct qed_src_t2 *p_t2; + int rc; + + memset(&src_iids, 0, sizeof(src_iids)); + + /* if the SRC ILT client is inactive - there are no connection + * requiring the searcer, leave. + */ + p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; + if (!p_src->active) + return 0; + + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + total_size = conn_num * sizeof(struct src_ent); + + /* use the same page size as the SRC ILT client */ + psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); + p_t2 = &p_mngr->src_t2; + p_t2->num_pages = DIV_ROUND_UP(total_size, psz); + + /* allocate t2 */ + p_t2->dma_mem = kcalloc(p_t2->num_pages, sizeof(struct phys_mem_desc), + GFP_KERNEL); + if (!p_t2->dma_mem) { + DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n"); + rc = -ENOMEM; + goto t2_fail; + } + + rc = qed_cxt_t2_alloc_pages(p_hwfn, p_t2, total_size, psz); + if (rc) + goto t2_fail; + + /* Set the t2 pointers */ + + /* entries per page - must be a power of two */ + ent_per_page = psz / sizeof(struct src_ent); + + p_t2->first_free = (u64)p_t2->dma_mem[0].phys_addr; + + p_t2_last_page = &p_t2->dma_mem[(conn_num - 1) / ent_per_page]; + p_t2->last_free = (u64)p_t2_last_page->phys_addr + + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); + + for (i = 0; i < p_t2->num_pages; i++) { + u32 ent_num = min_t(u32, + ent_per_page, + conn_num); + struct src_ent *entries = p_t2->dma_mem[i].virt_addr; + u64 p_ent_phys = (u64)p_t2->dma_mem[i].phys_addr, val; + u32 j; + + for (j = 0; j < ent_num - 1; j++) { + val = p_ent_phys + (j + 1) * sizeof(struct src_ent); + entries[j].next = cpu_to_be64(val); + } + + if (i < p_t2->num_pages - 1) + val = (u64)p_t2->dma_mem[i + 1].phys_addr; + else + val = 0; + entries[j].next = cpu_to_be64(val); + + conn_num -= ent_num; + } + + return 0; + +t2_fail: + qed_cxt_src_t2_free(p_hwfn); + return rc; +} + +#define for_each_ilt_valid_client(pos, clients) \ + for (pos = 0; pos < MAX_ILT_CLIENTS; pos++) \ + if (!clients[pos].active) { \ + continue; \ + } else \ + +/* Total number of ILT lines used by this PF */ +static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients) +{ + u32 size = 0; + u32 i; + + for_each_ilt_valid_client(i, ilt_clients) + size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1); + + return size; +} + +static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 ilt_size, i; + + ilt_size = qed_cxt_ilt_shadow_size(p_cli); + + for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { + struct phys_mem_desc *p_dma = &p_mngr->ilt_shadow[i]; + + if (p_dma->virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_dma->size, p_dma->virt_addr, + p_dma->phys_addr); + p_dma->virt_addr = NULL; + } + kfree(p_mngr->ilt_shadow); + p_mngr->ilt_shadow = NULL; +} + +static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn, + struct qed_ilt_cli_blk *p_blk, + enum ilt_clients ilt_client, + u32 start_line_offset) +{ + struct phys_mem_desc *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 lines, line, sz_left, lines_to_skip = 0; + + /* Special handling for RoCE that supports dynamic allocation */ + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && + ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM)) + return 0; + + lines_to_skip = p_blk->dynamic_line_cnt; + + if (!p_blk->total_size) + return 0; + + sz_left = p_blk->total_size; + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; + line = p_blk->start_line + start_line_offset - + p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; + + for (; lines; lines--) { + dma_addr_t p_phys; + void *p_virt; + u32 size; + + size = min_t(u32, sz_left, p_blk->real_size_in_page); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, size, + &p_phys, GFP_KERNEL); + if (!p_virt) + return -ENOMEM; + + ilt_shadow[line].phys_addr = p_phys; + ilt_shadow[line].virt_addr = p_virt; + ilt_shadow[line].size = size; + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n", + line, (u64)p_phys, p_virt, size); + + sz_left -= size; + line++; + } + + return 0; +} + +static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *clients = p_mngr->clients; + struct qed_ilt_cli_blk *p_blk; + u32 size, i, j, k; + int rc; + + size = qed_cxt_ilt_shadow_size(clients); + p_mngr->ilt_shadow = kcalloc(size, sizeof(struct phys_mem_desc), + GFP_KERNEL); + if (!p_mngr->ilt_shadow) { + rc = -ENOMEM; + goto ilt_shadow_fail; + } + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "Allocated 0x%x bytes for ilt shadow\n", + (u32)(size * sizeof(struct phys_mem_desc))); + + for_each_ilt_valid_client(i, clients) { + for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { + p_blk = &clients[i].pf_blks[j]; + rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0); + if (rc) + goto ilt_shadow_fail; + } + for (k = 0; k < p_mngr->vf_count; k++) { + for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { + u32 lines = clients[i].vf_total_lines * k; + + p_blk = &clients[i].vf_blks[j]; + rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines); + if (rc) + goto ilt_shadow_fail; + } + } + } + + return 0; + +ilt_shadow_fail: + qed_ilt_shadow_free(p_hwfn); + return rc; +} + +static void qed_cid_map_free(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 type, vf; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + bitmap_free(p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].max_count = 0; + p_mngr->acquired[type].start_cid = 0; + + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + bitmap_free(p_mngr->acquired_vf[type][vf].cid_map); + p_mngr->acquired_vf[type][vf].max_count = 0; + p_mngr->acquired_vf[type][vf].start_cid = 0; + } + } +} + +static int +qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn, + u32 type, + u32 cid_start, + u32 cid_count, struct qed_cid_acquired_map *p_map) +{ + if (!cid_count) + return 0; + + p_map->cid_map = bitmap_zalloc(cid_count, GFP_KERNEL); + if (!p_map->cid_map) + return -ENOMEM; + + p_map->max_count = cid_count; + p_map->start_cid = cid_start; + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Type %08x start: %08x count %08x\n", + type, p_map->start_cid, p_map->max_count); + + return 0; +} + +static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 start_cid = 0, vf_start_cid = 0; + u32 type, vf; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type]; + struct qed_cid_acquired_map *p_map; + + /* Handle PF maps */ + p_map = &p_mngr->acquired[type]; + if (qed_cid_map_alloc_single(p_hwfn, type, start_cid, + p_cfg->cid_count, p_map)) + goto cid_map_fail; + + /* Handle VF maps */ + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + if (qed_cid_map_alloc_single(p_hwfn, type, + vf_start_cid, + p_cfg->cids_per_vf, p_map)) + goto cid_map_fail; + } + + start_cid += p_cfg->cid_count; + vf_start_cid += p_cfg->cids_per_vf; + } + + return 0; + +cid_map_fail: + qed_cid_map_free(p_hwfn); + return -ENOMEM; +} + +int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients; + struct qed_cxt_mngr *p_mngr; + u32 i; + + p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL); + if (!p_mngr) + return -ENOMEM; + + /* Initialize ILT client registers */ + clients = p_mngr->clients; + clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); + clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); + clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); + + clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); + clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); + clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); + + clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); + clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); + clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); + + clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT); + clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT); + clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE); + /* default ILT page size for all clients is 64K */ + for (i = 0; i < MAX_ILT_CLIENTS; i++) + p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + + p_mngr->conn_ctx_size = CONN_CXT_SIZE(p_hwfn); + + /* Initialize task sizes */ + p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn); + p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn); + + if (p_hwfn->cdev->p_iov_info) { + p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs; + p_mngr->first_vf_in_pf = + p_hwfn->cdev->p_iov_info->first_vf_in_pf; + } + /* Initialize the dynamic ILT allocation mutex */ + mutex_init(&p_mngr->mutex); + + /* Set the cxt mangr pointer priori to further allocations */ + p_hwfn->p_cxt_mngr = p_mngr; + + return 0; +} + +int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn) +{ + int rc; + + /* Allocate the ILT shadow table */ + rc = qed_ilt_shadow_alloc(p_hwfn); + if (rc) + goto tables_alloc_fail; + + /* Allocate the T2 table */ + rc = qed_cxt_src_t2_alloc(p_hwfn); + if (rc) + goto tables_alloc_fail; + + /* Allocate and initialize the acquired cids bitmaps */ + rc = qed_cid_map_alloc(p_hwfn); + if (rc) + goto tables_alloc_fail; + + return 0; + +tables_alloc_fail: + qed_cxt_mngr_free(p_hwfn); + return rc; +} + +void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_cxt_mngr) + return; + + qed_cid_map_free(p_hwfn); + qed_cxt_src_t2_free(p_hwfn); + qed_ilt_shadow_free(p_hwfn); + kfree(p_hwfn->p_cxt_mngr); + + p_hwfn->p_cxt_mngr = NULL; +} + +void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map; + struct qed_conn_type_cfg *p_cfg; + int type; + + /* Reset acquired cids */ + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 vf; + + p_cfg = &p_mngr->conn_cfg[type]; + if (p_cfg->cid_count) { + p_map = &p_mngr->acquired[type]; + bitmap_zero(p_map->cid_map, p_map->max_count); + } + + if (!p_cfg->cids_per_vf) + continue; + + for (vf = 0; vf < MAX_NUM_VFS; vf++) { + p_map = &p_mngr->acquired_vf[type][vf]; + bitmap_zero(p_map->cid_map, p_map->max_count); + } + } +} + +/* CDU Common */ +#define CDUC_CXT_SIZE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT + +#define CDUC_CXT_SIZE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) + +#define CDUC_BLOCK_WASTE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT + +#define CDUC_BLOCK_WASTE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) + +#define CDUC_NCIB_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT + +#define CDUC_NCIB_MASK \ + (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) + +#define CDUT_TYPE0_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT + +#define CDUT_TYPE0_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ + CDUT_TYPE0_CXT_SIZE_SHIFT) + +#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE0_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ + CDUT_TYPE0_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE0_NCIB_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE0_NCIB_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE0_NCIB_SHIFT) + +#define CDUT_TYPE1_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT + +#define CDUT_TYPE1_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ + CDUT_TYPE1_CXT_SIZE_SHIFT) + +#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE1_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ + CDUT_TYPE1_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE1_NCIB_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE1_NCIB_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE1_NCIB_SHIFT) + +static void qed_cdu_init_common(struct qed_hwfn *p_hwfn) +{ + u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; + + /* CDUC - connection configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + cxt_size = CONN_CXT_SIZE(p_hwfn); + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); + SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); + SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-0 tasks configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-1 tasks configuration */ + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); +} + +/* CDU PF */ +#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT +#define CDU_SEG_REG_TYPE_MASK 0x1 +#define CDU_SEG_REG_OFFSET_SHIFT 0 +#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK + +static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_tid_seg *p_seg; + u32 cdu_seg_params, offset; + int i; + + static const u32 rt_type_offset_arr[] = { + CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + static const u32 rt_type_offset_fl_arr[] = { + CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + + /* There are initializations only for CDUT during pf Phase */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + /* Segment 0 */ + p_seg = qed_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg) + continue; + + /* Note: start_line is already adjusted for the CDU + * segment register granularity, so we just need to + * divide. Adjustment is implicit as we assume ILT + * Page size is larger than 32K! + */ + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); + + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); + } +} + +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_pf_rt_init_params params; + struct qed_qm_iids iids; + + memset(&iids, 0, sizeof(iids)); + qed_cxt_qm_iids(p_hwfn, &iids); + + memset(¶ms, 0, sizeof(params)); + params.port_id = p_hwfn->port_id; + params.pf_id = p_hwfn->rel_pf_id; + params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params.is_pf_loading = is_pf_loading; + params.num_pf_cids = iids.cids; + params.num_vf_cids = iids.vf_cids; + params.num_tids = iids.tids; + params.start_pq = qm_info->start_pq; + params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs; + params.num_vf_pqs = qm_info->num_vf_pqs; + params.start_vport = qm_info->start_vport; + params.num_vports = qm_info->num_vports; + params.pf_wfq = qm_info->pf_wfq; + params.pf_rl = qm_info->pf_rl; + params.pq_params = qm_info->qm_pq_params; + params.vport_params = qm_info->qm_vport_params; + + qed_qm_pf_rt_init(p_hwfn, p_ptt, ¶ms); +} + +/* CM PF */ +static void qed_cm_init_pf(struct qed_hwfn *p_hwfn) +{ + /* XCM pure-LB queue */ + STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, + qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB)); +} + +/* DQ PF */ +static void qed_dq_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; + + dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); + + /* Connection types 6 & 7 are not in use, yet they must be configured + * as the highest possible connection. Not configuring them means the + * defaults will be used, and with a large number of cids a bug may + * occur, if the defaults will be smaller than dq_pf_max_cid / + * dq_vf_max_cid. + */ + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); + + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); +} + +static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *ilt_clients; + int i; + + ilt_clients = p_hwfn->p_cxt_mngr->clients; + for_each_ilt_valid_client(i, ilt_clients) { + STORE_RT_REG(p_hwfn, + ilt_clients[i].first.reg, + ilt_clients[i].first.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].last.reg, ilt_clients[i].last.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].p_size.reg, + ilt_clients[i].p_size.val); + } +} + +static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + u32 blk_factor; + + /* For simplicty we set the 'block' to be an ILT page */ + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_BASE_RT_OFFSET, + p_iov->first_vf_in_pf); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, + p_iov->first_vf_in_pf + p_iov->total_vfs); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; + blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } +} + +/* ILT (PSWRQ2) PF */ +static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *clients; + struct qed_cxt_mngr *p_mngr; + struct phys_mem_desc *p_shdw; + u32 line, rt_offst, i; + + qed_ilt_bounds_init(p_hwfn); + qed_ilt_vf_bounds_init(p_hwfn); + + p_mngr = p_hwfn->p_cxt_mngr; + p_shdw = p_mngr->ilt_shadow; + clients = p_hwfn->p_cxt_mngr->clients; + + for_each_ilt_valid_client(i, clients) { + /** Client's 1st val and RT array are absolute, ILT shadows' + * lines are relative. + */ + line = clients[i].first.val - p_mngr->pf_start_line; + rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + + clients[i].first.val * ILT_ENTRY_IN_REGS; + + for (; line <= clients[i].last.val - p_mngr->pf_start_line; + line++, rt_offst += ILT_ENTRY_IN_REGS) { + u64 ilt_hw_entry = 0; + + /** p_virt could be NULL incase of dynamic + * allocation + */ + if (p_shdw[line].virt_addr) { + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_shdw[line].phys_addr >> 12)); + + DP_VERBOSE(p_hwfn, QED_MSG_ILT, + "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n", + rt_offst, line, i, + (u64)(p_shdw[line].phys_addr >> 12)); + } + + STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); + } + } +} + +/* SRC (Searcher) PF */ +static void qed_src_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rounded_conn_num, conn_num, conn_max; + struct qed_src_iids src_iids; + + memset(&src_iids, 0, sizeof(src_iids)); + qed_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (!conn_num) + return; + + conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS); + rounded_conn_num = roundup_pow_of_two(conn_max); + + STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); + STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, + ilog2(rounded_conn_num)); + + STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->src_t2.first_free); + STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->src_t2.last_free); +} + +/* Timers PF */ +#define TM_CFG_NUM_IDS_SHIFT 0 +#define TM_CFG_NUM_IDS_MASK 0xFFFFULL +#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 +#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL +#define TM_CFG_PARENT_PF_SHIFT 25 +#define TM_CFG_PARENT_PF_MASK 0x7ULL + +#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 +#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL + +#define TM_CFG_TID_OFFSET_SHIFT 30 +#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL +#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 +#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL + +static void qed_tm_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 active_seg_mask = 0, tm_offset, rt_reg; + struct qed_tm_iids tm_iids; + u64 cfg_word; + u8 i; + + memset(&tm_iids, 0, sizeof(tm_iids)); + qed_cxt_tm_iids(p_hwfn, p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */ + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + + /* enale scan */ + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, + tm_iids.pf_cids ? 0x1 : 0x0); + + /* @@@TBD how to enable the scan for the VFs */ + + tm_offset = tm_iids.per_vf_cids; + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + tm_offset = tm_iids.pf_cids; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->cdev) + + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + active_seg_mask |= (tm_iids.pf_tids[i] ? BIT(i) : 0); + + tm_offset += tm_iids.pf_tids[i]; + } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) + active_seg_mask = 0; + + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); + + /* @@@TBD how to enable the scan for the VFs */ +} + +static void qed_prs_init_common(struct qed_hwfn *p_hwfn) +{ + if ((p_hwfn->hw_info.personality == QED_PCI_FCOE) && + p_hwfn->pf_params.fcoe_pf_params.is_target) + STORE_RT_REG(p_hwfn, + PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET, 0); +} + +static void qed_prs_init_pf(struct qed_hwfn *p_hwfn) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_conn_type_cfg *p_fcoe; + struct qed_tid_seg *p_tid; + + p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE]; + + /* If FCoE is active set the MAX OX_ID (tid) in the Parser */ + if (!p_fcoe->cid_count) + return; + + p_tid = &p_fcoe->tid_seg[QED_CXT_FCOE_TID_SEG]; + if (p_hwfn->pf_params.fcoe_pf_params.is_target) { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET, + p_tid->count); + } else { + STORE_RT_REG_AGG(p_hwfn, + PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET, + p_tid->count); + } +} + +void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) +{ + qed_cdu_init_common(p_hwfn); + qed_prs_init_common(p_hwfn); +} + +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_qm_init_pf(p_hwfn, p_ptt, true); + qed_cm_init_pf(p_hwfn); + qed_dq_init_pf(p_hwfn); + qed_cdu_init_pf(p_hwfn); + qed_ilt_init_pf(p_hwfn); + qed_src_init_pf(p_hwfn); + qed_tm_init_pf(p_hwfn); + qed_prs_init_pf(p_hwfn); +} + +int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid, u8 vfid) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map; + u32 rel_cid; + + if (type >= MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); + return -EINVAL; + } + + if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) { + DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid); + return -EINVAL; + } + + /* Determine the right map to take this CID from */ + if (vfid == QED_CXT_PF_CID) + p_map = &p_mngr->acquired[type]; + else + p_map = &p_mngr->acquired_vf[type][vfid]; + + if (!p_map->cid_map) { + DP_NOTICE(p_hwfn, "Invalid protocol type %d", type); + return -EINVAL; + } + + rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count); + + if (rel_cid >= p_map->max_count) { + DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type); + return -EINVAL; + } + + __set_bit(rel_cid, p_map->cid_map); + + *p_cid = rel_cid + p_map->start_cid; + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n", + *p_cid, rel_cid, vfid, type); + + return 0; +} + +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid) +{ + return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID); +} + +static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn, + u32 cid, + u8 vfid, + enum protocol_type *p_type, + struct qed_cid_acquired_map **pp_map) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rel_cid; + + /* Iterate over protocols and find matching cid range */ + for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) { + if (vfid == QED_CXT_PF_CID) + *pp_map = &p_mngr->acquired[*p_type]; + else + *pp_map = &p_mngr->acquired_vf[*p_type][vfid]; + + if (!((*pp_map)->cid_map)) + continue; + if (cid >= (*pp_map)->start_cid && + cid < (*pp_map)->start_cid + (*pp_map)->max_count) + break; + } + + if (*p_type == MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid); + goto fail; + } + + rel_cid = cid - (*pp_map)->start_cid; + if (!test_bit(rel_cid, (*pp_map)->cid_map)) { + DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired", + cid, vfid); + goto fail; + } + + return true; +fail: + *p_type = MAX_CONN_TYPES; + *pp_map = NULL; + return false; +} + +void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid) +{ + struct qed_cid_acquired_map *p_map = NULL; + enum protocol_type type; + bool b_acquired; + u32 rel_cid; + + if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) { + DP_NOTICE(p_hwfn, + "Trying to return incorrect CID belonging to VF %02x\n", + vfid); + return; + } + + /* Test acquired and find matching per-protocol map */ + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid, + &type, &p_map); + + if (!b_acquired) + return; + + rel_cid = cid - p_map->start_cid; + clear_bit(rel_cid, p_map->cid_map); + + DP_VERBOSE(p_hwfn, QED_MSG_CXT, + "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n", + cid, rel_cid, vfid, type); +} + +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + _qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID); +} + +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_cid_acquired_map *p_map = NULL; + u32 conn_cxt_size, hw_p_size, cxts_per_p, line; + enum protocol_type type; + bool b_acquired; + + /* Test acquired and find matching per-protocol map */ + b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, + QED_CXT_PF_CID, &type, &p_map); + + if (!b_acquired) + return -EINVAL; + + /* set the protocl type */ + p_info->type = type; + + /* compute context virtual pointer */ + hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + + conn_cxt_size = CONN_CXT_SIZE(p_hwfn); + cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; + line = p_info->iid / cxts_per_p; + + /* Make sure context is allocated (dynamic allocation) */ + if (!p_mngr->ilt_shadow[line].virt_addr) + return -EINVAL; + + p_info->p_cxt = p_mngr->ilt_shadow[line].virt_addr + + p_info->iid % cxts_per_p * conn_cxt_size; + + DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT), + "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", + p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid); + + return 0; +} + +static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn, + struct qed_rdma_pf_params *p_params, + u32 num_tasks) +{ + u32 num_cons, num_qps; + enum protocol_type proto; + + if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n"); + p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE; + } + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_IWARP: + /* Each QP requires one connection */ + num_cons = min_t(u32, IWARP_MAX_QPS, p_params->num_qps); + proto = PROTOCOLID_IWARP; + break; + case QED_PCI_ETH_ROCE: + num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps); + num_cons = num_qps * 2; /* each QP requires two connections */ + proto = PROTOCOLID_ROCE; + break; + default: + return; + } + + if (num_cons && num_tasks) { + u32 num_srqs, num_xrc_srqs; + + qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0); + + /* Deliberatly passing ROCE for tasks id. This is because + * iWARP / RoCE share the task id. + */ + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE, + QED_CXT_ROCE_TID_SEG, 1, + num_tasks, false); + + num_srqs = min_t(u32, QED_RDMA_MAX_SRQS, p_params->num_srqs); + + /* XRC SRQs populate a single ILT page */ + num_xrc_srqs = qed_cxt_xrc_srqs_per_page(p_hwfn); + + qed_cxt_set_srq_count(p_hwfn, num_srqs, num_xrc_srqs); + } else { + DP_INFO(p_hwfn->cdev, + "RDMA personality used without setting params!\n"); + } +} + +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks) +{ + /* Set the number of required CORE connections */ + u32 core_cids = 1; /* SPQ */ + + if (p_hwfn->using_ll2) + core_cids += 4; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH_RDMA: + case QED_PCI_ETH_IWARP: + case QED_PCI_ETH_ROCE: + { + qed_rdma_set_pf_params(p_hwfn, + &p_hwfn-> + pf_params.rdma_pf_params, + rdma_tasks); + /* no need for break since RoCE coexist with Ethernet */ + } + fallthrough; + case QED_PCI_ETH: + { + struct qed_eth_pf_params *p_params = + &p_hwfn->pf_params.eth_pf_params; + + if (!p_params->num_vf_cons) + p_params->num_vf_cons = + ETH_PF_PARAMS_VF_CONS_DEFAULT; + qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + p_params->num_cons, + p_params->num_vf_cons); + p_hwfn->p_cxt_mngr->arfs_count = p_params->num_arfs_filters; + break; + } + case QED_PCI_FCOE: + { + struct qed_fcoe_pf_params *p_params; + + p_params = &p_hwfn->pf_params.fcoe_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_FCOE, + p_params->num_cons, + 0); + qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_FCOE, + QED_CXT_FCOE_TID_SEG, 0, + p_params->num_tasks, true); + } else { + DP_INFO(p_hwfn->cdev, + "Fcoe personality used without setting params!\n"); + } + break; + } + case QED_PCI_ISCSI: + { + struct qed_iscsi_pf_params *p_params; + + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + p_params->num_cons, + 0); + qed_cxt_set_proto_tid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + QED_CXT_TCP_ULP_TID_SEG, + 0, + p_params->num_tasks, + true); + } else { + DP_INFO(p_hwfn->cdev, + "Iscsi personality used without setting params!\n"); + } + break; + } + case QED_PCI_NVMETCP: + { + struct qed_nvmetcp_pf_params *p_params; + + p_params = &p_hwfn->pf_params.nvmetcp_pf_params; + + if (p_params->num_cons && p_params->num_tasks) { + qed_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + p_params->num_cons, + 0); + qed_cxt_set_proto_tid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + QED_CXT_TCP_ULP_TID_SEG, + 0, + p_params->num_tasks, + true); + } else { + DP_INFO(p_hwfn->cdev, + "NvmeTCP personality used without setting params!\n"); + } + break; + } + default: + return -EINVAL; + } + + return 0; +} + +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 proto, seg, total_lines, i, shadow_line; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_fl_seg; + struct qed_tid_seg *p_seg_info; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + proto = PROTOCOLID_TCP_ULP; + seg = QED_CXT_TCP_ULP_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + if (!p_seg_info->has_fl_mem) + return -EINVAL; + + p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + total_lines = DIV_ROUND_UP(p_fl_seg->total_size, + p_fl_seg->real_size_in_page); + + for (i = 0; i < total_lines; i++) { + shadow_line = i + p_fl_seg->start_line - + p_hwfn->p_cxt_mngr->pf_start_line; + p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].virt_addr; + } + p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - + p_fl_seg->real_size_in_page; + p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; + p_info->num_tids_per_block = p_fl_seg->real_size_in_page / + p_info->tid_size; + + return 0; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +int +qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, u32 iid) +{ + u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line; + struct tdif_task_context *tdif_context; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + struct qed_ptt *p_ptt; + dma_addr_t p_phys; + u64 ilt_hw_entry; + void *p_virt; + u32 flags1; + int rc = 0; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + /* The first ILT page is not used for regular SRQs. Skip it. */ + iid += p_hwfn->p_cxt_mngr->xrc_srq_count; + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_XRC_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = XRC_SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EOPNOTSUPP elem type = %d", elem_type); + return -EOPNOTSUPP; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + line = p_blk->start_line + (iid / elems_per_p); + shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line; + + /* If line is already allocated, do nothing, otherwise allocate it and + * write it to the PSWRQ2 registers. + * This section can be run in parallel from different contexts and thus + * a mutex protection is needed. + */ + + mutex_lock(&p_hwfn->p_cxt_mngr->mutex); + + if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr) + goto out0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + rc = -EBUSY; + goto out0; + } + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_blk->real_size_in_page, &p_phys, + GFP_KERNEL); + if (!p_virt) { + rc = -ENOMEM; + goto out1; + } + + /* configuration of refTagMask to 0xF is required for RoCE DIF MR only, + * to compensate for a HW bug, but it is configured even if DIF is not + * enabled. This is harmless and allows us to avoid a dedicated API. We + * configure the field for all of the contexts on the newly allocated + * page. + */ + if (elem_type == QED_ELEM_TASK) { + u32 elem_i; + u8 *elem_start = (u8 *)p_virt; + union type1_task_context *elem; + + for (elem_i = 0; elem_i < elems_per_p; elem_i++) { + elem = (union type1_task_context *)elem_start; + tdif_context = &elem->roce_ctx.tdif_context; + + flags1 = le32_to_cpu(tdif_context->flags1); + SET_FIELD(flags1, TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf); + tdif_context->flags1 = cpu_to_le32(flags1); + + elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); + } + } + + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].virt_addr = p_virt; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr = p_phys; + p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size = + p_blk->real_size_in_page; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS); + + ilt_hw_entry = 0; + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].phys_addr + >> 12)); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */ + qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry, + reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), + NULL); + + if (elem_type == QED_ELEM_CXT) { + u32 last_cid_allocated = (1 + (iid / elems_per_p)) * + elems_per_p; + + /* Update the relevant register in the parser */ + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, + last_cid_allocated - 1); + + if (!p_hwfn->b_rdma_enabled_in_prs) { + /* Enable RDMA search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; + } + } + +out1: + qed_ptt_release(p_hwfn, p_ptt); +out0: + mutex_unlock(&p_hwfn->p_cxt_mngr->mutex); + + return rc; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +static int +qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, + u32 start_iid, u32 count) +{ + u32 start_line, end_line, shadow_start_line, shadow_end_line; + u32 reg_offset, elem_size, hw_p_size, elems_per_p; + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u32 end_iid = start_iid + count; + struct qed_ptt *p_ptt; + u64 ilt_hw_entry = 0; + u32 i; + + switch (elem_type) { + case QED_ELEM_CXT: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + break; + case QED_ELEM_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_XRC_SRQ: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM]; + elem_size = XRC_SRQ_CXT_SIZE; + p_blk = &p_cli->pf_blks[SRQ_BLK]; + break; + case QED_ELEM_TASK: + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)]; + break; + default: + DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type); + return -EINVAL; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + start_line = p_blk->start_line + (start_iid / elems_per_p); + end_line = p_blk->start_line + (end_iid / elems_per_p); + if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) + end_line--; + + shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; + shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, + "QED_TIME_OUT on ptt acquire - dynamic allocation"); + return -EBUSY; + } + + for (i = shadow_start_line; i < shadow_end_line; i++) { + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr) + continue; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_hwfn->p_cxt_mngr->ilt_shadow[i].size, + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr, + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr); + + p_hwfn->p_cxt_mngr->ilt_shadow[i].virt_addr = NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].phys_addr = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + ((start_line++) * ILT_REG_SIZE_IN_BYTES * + ILT_ENTRY_IN_REGS); + + /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a + * wide-bus. + */ + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64) (uintptr_t) &ilt_hw_entry, + reg_offset, + sizeof(ilt_hw_entry) / sizeof(u32), + NULL); + } + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto) +{ + int rc; + u32 cid; + + /* Free Connection CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT, + qed_cxt_get_proto_cid_start(p_hwfn, + proto), + qed_cxt_get_proto_cid_count(p_hwfn, + proto, &cid)); + + if (rc) + return rc; + + /* Free Task CXT ( Intentionally RoCE as task-id is shared between + * RoCE and iWARP ) + */ + proto = PROTOCOLID_ROCE; + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, + qed_cxt_get_proto_tid_count(p_hwfn, proto)); + if (rc) + return rc; + + /* Free TSDM CXT */ + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_XRC_SRQ, 0, + p_hwfn->p_cxt_mngr->xrc_srq_count); + + rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, + p_hwfn->p_cxt_mngr->xrc_srq_count, + p_hwfn->p_cxt_mngr->srq_count); + + return rc; +} + +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **pp_task_ctx) +{ + struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct qed_ilt_client_cfg *p_cli; + struct qed_tid_seg *p_seg_info; + struct qed_ilt_cli_blk *p_seg; + u32 num_tids_per_block; + u32 tid_size, ilt_idx; + u32 total_lines; + u32 proto, seg; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + proto = PROTOCOLID_FCOE; + seg = QED_CXT_FCOE_TID_SEG; + break; + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + proto = PROTOCOLID_TCP_ULP; + seg = QED_CXT_TCP_ULP_TID_SEG; + break; + default: + return -EINVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return -EINVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + if (ctx_type == QED_CTX_WORKING_MEM) { + p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; + } else if (ctx_type == QED_CTX_FL_MEM) { + if (!p_seg_info->has_fl_mem) + return -EINVAL; + p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + } else { + return -EINVAL; + } + total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); + tid_size = p_mngr->task_type_size[p_seg_info->type]; + num_tids_per_block = p_seg->real_size_in_page / tid_size; + + if (total_lines < tid / num_tids_per_block) + return -EINVAL; + + ilt_idx = tid / num_tids_per_block + p_seg->start_line - + p_mngr->pf_start_line; + *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].virt_addr + + (tid % num_tids_per_block) * tid_size; + + return 0; +} + +static u16 qed_blk_calculate_pages(struct qed_ilt_cli_blk *p_blk) +{ + if (p_blk->real_size_in_page == 0) + return 0; + + return DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); +} + +u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(i, VF)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 i, pages = 0; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} + +u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn) +{ + struct qed_ilt_client_cfg *p_cli; + struct qed_ilt_cli_blk *p_blk; + u16 pages = 0, i; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + for (i = 0; i < NUM_TASK_VF_SEGMENTS; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(i)]; + pages += qed_blk_calculate_pages(p_blk); + } + + return pages; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h new file mode 100644 index 0000000000..168ce2c503 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -0,0 +1,367 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_CXT_H +#define _QED_CXT_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/qed/qed_if.h> +#include "qed_hsi.h" +#include "qed.h" + +struct qed_cxt_info { + void *p_cxt; + u32 iid; + enum protocol_type type; +}; + +#define MAX_TID_BLOCKS 512 +struct qed_tid_mem { + u32 tid_size; + u32 num_tids_per_block; + u32 waste; + u8 *blocks[MAX_TID_BLOCKS]; /* 4K */ +}; + +/** + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. + * + * @p_hwfn: HW device data. + * @p_info: In/out. + * + * Return: Int. + */ +int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, + struct qed_cxt_info *p_info); + +/** + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. + * + * @p_hwfn: HW device data. + * @p_info: in/out. + * + * Return: int. + */ +int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, + struct qed_tid_mem *p_info); + +#define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP +#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE +#define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE +enum qed_cxt_elem_type { + QED_ELEM_CXT, + QED_ELEM_SRQ, + QED_ELEM_TASK, + QED_ELEM_XRC_SRQ, +}; + +u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid); + +/** + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. + * + * Return: int. + */ +int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); + +/** + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. + * + * @p_hwfn: HW device data. + * @last_line: Last_line. + * + * Return: Int + */ +int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); + +/** + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. + * + * Return: Int. + */ +u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); + +/** + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_cxt_mngr_free() - Context manager free. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); + +/** + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_cxt_mngr_setup(): Reset the acquired CIDs. + * + * @p_hwfn: HW device data. + */ +void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); + +/** + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. + * + * Return: Void. + */ +void qed_qm_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool is_pf_loading); + +/** + * qed_qm_reconf(): Reconfigures QM pf on the fly. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +#define QED_CXT_PF_CID (0xff) + +/** + * qed_cxt_release_cid(): Release a cid. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. + */ +void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); + +/** + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. + * + * Return: Void. + */ +void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); + +/** + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. + * + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * + * Return: Int. + */ +int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid); + +/** + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. + * + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. + * + * Return: Int. + */ +int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid, u8 vfid); + +int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, + enum qed_cxt_elem_type elem_type, u32 iid); +u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn, + enum protocol_type type); +u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn, + enum protocol_type type); +int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto); + +#define QED_CTX_WORKING_MEM 0 +#define QED_CTX_FL_MEM 1 +int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn, + u32 tid, u8 ctx_type, void **task_ctx); + +/* Max number of connection types in HW (DQ/CDU etc.) */ +#define MAX_CONN_TYPES PROTOCOLID_COMMON +#define NUM_TASK_TYPES 2 +#define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 + +/* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct qed_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + +struct qed_conn_type_cfg { + u32 cid_count; + u32 cids_per_vf; + struct qed_tid_seg tid_seg[TASK_SEGMENTS]; +}; + +/* ILT Client configuration, + * Per connection type (protocol) resources (cids, tis, vf cids etc.) + * 1 - for connection context (CDUC) and for each task context we need two + * values, for regular task context and for force load memory + */ +#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) +#define CDUC_BLK (0) +#define SRQ_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS) + +struct ilt_cfg_pair { + u32 reg; + u32 val; +}; + +struct qed_ilt_cli_blk { + u32 total_size; /* 0 means not active */ + u32 real_size_in_page; + u32 start_line; + u32 dynamic_line_offset; + u32 dynamic_line_cnt; +}; + +struct qed_ilt_client_cfg { + bool active; + + /* ILT boundaries */ + struct ilt_cfg_pair first; + struct ilt_cfg_pair last; + struct ilt_cfg_pair p_size; + + /* ILT client blocks for PF */ + struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; + u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; +}; + +struct qed_cid_acquired_map { + u32 start_cid; + u32 max_count; + unsigned long *cid_map; +}; + +struct qed_src_t2 { + struct phys_mem_desc *dma_mem; + u32 num_pages; + u64 first_free; + u64 last_free; +}; + +struct qed_cxt_mngr { + /* Per protocl configuration */ + struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; + + /* computed ILT structure */ + struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS]; + + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + u32 first_vf_in_pf; + + /* Acquired CIDs */ + struct qed_cid_acquired_map acquired[MAX_CONN_TYPES]; + + struct qed_cid_acquired_map + acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS]; + + /* ILT shadow table */ + struct phys_mem_desc *ilt_shadow; + u32 ilt_shadow_size; + u32 pf_start_line; + + /* Mutex for a dynamic ILT allocation */ + struct mutex mutex; + + /* SRC T2 */ + struct qed_src_t2 src_t2; + + /* total number of SRQ's for this hwfn */ + u32 srq_count; + u32 xrc_srq_count; + + /* Maximal number of L2 steering filters */ + u32 arfs_count; + + u16 iscsi_task_pages; + u16 fcoe_task_pages; + u16 roce_task_pages; + u16 eth_task_pages; + u16 task_ctx_size; + u16 conn_ctx_size; +}; + +u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn); +u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn); + +u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn, + enum ilt_clients ilt_client); + +u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h new file mode 100644 index 0000000000..f6cd1b3efd --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h @@ -0,0 +1,1491 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ +#ifndef _QED_DBG_HSI_H +#define _QED_DBG_HSI_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> + +/****************************************/ +/* Debug Tools HSI constants and macros */ +/****************************************/ + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_MSTAT, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_LED, + BLOCK_AVS_WRAP, + BLOCK_PXPREQBUS, + BLOCK_BAR0_MAP, + BLOCK_MCP_FIO, + BLOCK_LAST_INIT, + BLOCK_PRS_FC, + BLOCK_PBF_FC, + BLOCK_NIG_LB_FC, + BLOCK_NIG_LB_FC_PLLH, + BLOCK_NIG_TX_FC_PLLH, + BLOCK_NIG_TX_FC, + BLOCK_NIG_RX_FC_PLLH, + BLOCK_NIG_RX_FC, + MAX_BLOCK_ID +}; + +/* binary debug buffer types */ +enum bin_dbg_buffer_type { + BIN_BUF_DBG_MODE_TREE, + BIN_BUF_DBG_DUMP_REG, + BIN_BUF_DBG_DUMP_MEM, + BIN_BUF_DBG_IDLE_CHK_REGS, + BIN_BUF_DBG_IDLE_CHK_IMMS, + BIN_BUF_DBG_IDLE_CHK_RULES, + BIN_BUF_DBG_IDLE_CHK_PARSING_DATA, + BIN_BUF_DBG_ATTN_BLOCKS, + BIN_BUF_DBG_ATTN_REGS, + BIN_BUF_DBG_ATTN_INDEXES, + BIN_BUF_DBG_ATTN_NAME_OFFSETS, + BIN_BUF_DBG_BLOCKS, + BIN_BUF_DBG_BLOCKS_CHIP_DATA, + BIN_BUF_DBG_BUS_LINES, + BIN_BUF_DBG_BLOCKS_USER_DATA, + BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA, + BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS, + BIN_BUF_DBG_RESET_REGS, + BIN_BUF_DBG_PARSING_STRINGS, + MAX_BIN_DBG_BUFFER_TYPE +}; + +/* Attention bit mapping */ +struct dbg_attn_bit_mapping { + u16 data; +#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF +#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1 +#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15 +}; + +/* Attention block per-type data */ +struct dbg_attn_block_type_data { + u16 names_offset; + u16 reserved1; + u8 num_regs; + u8 reserved2; + u16 regs_offset; + +}; + +/* Block attentions */ +struct dbg_attn_block { + struct dbg_attn_block_type_data per_type_data[2]; +}; + +/* Attention register result */ +struct dbg_attn_reg_result { + u32 data; +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24 + u16 block_attn_offset; + u16 reserved; + u32 sts_val; + u32 mask_val; +}; + +/* Attention block result */ +struct dbg_attn_block_result { + u8 block_id; + u8 data; +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3 +#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0 +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F +#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2 + u16 names_offset; + struct dbg_attn_reg_result reg_results[15]; +}; + +/* Mode header */ +struct dbg_mode_hdr { + u16 data; +#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1 +#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0 +#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF +#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1 +}; + +/* Attention register */ +struct dbg_attn_reg { + struct dbg_mode_hdr mode; + u16 block_attn_offset; + u32 data; +#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF +#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0 +#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF +#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24 + u32 sts_clr_address; + u32 mask_address; +}; + +/* Attention types */ +enum dbg_attn_type { + ATTN_TYPE_INTERRUPT, + ATTN_TYPE_PARITY, + MAX_DBG_ATTN_TYPE +}; + +/* Block debug data */ +struct dbg_block { + u8 name[15]; + u8 associated_storm_letter; +}; + +/* Chip-specific block debug data */ +struct dbg_block_chip { + u8 flags; +#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1 +#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1 +#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1 +#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4 +#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7 +#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5 + u8 dbg_client_id; + u8 reset_reg_id; + u8 reset_reg_bit_offset; + struct dbg_mode_hdr dbg_bus_mode; + u16 reserved1; + u8 reserved2; + u8 num_of_dbg_bus_lines; + u16 dbg_bus_lines_offset; + u32 dbg_select_reg_addr; + u32 dbg_dword_enable_reg_addr; + u32 dbg_shift_reg_addr; + u32 dbg_force_valid_reg_addr; + u32 dbg_force_frame_reg_addr; +}; + +/* Chip-specific block user debug data */ +struct dbg_block_chip_user { + u8 num_of_dbg_bus_lines; + u8 has_latency_events; + u16 names_offset; +}; + +/* Block user debug data */ +struct dbg_block_user { + u8 name[16]; +}; + +/* Block Debug line data */ +struct dbg_bus_line { + u8 data; +#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF +#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0 +#define DBG_BUS_LINE_IS_256B_MASK 0x1 +#define DBG_BUS_LINE_IS_256B_SHIFT 4 +#define DBG_BUS_LINE_RESERVED_MASK 0x7 +#define DBG_BUS_LINE_RESERVED_SHIFT 5 + u8 group_sizes; +}; + +/* Condition header for registers dump */ +struct dbg_dump_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u8 block_id; /* block ID */ + u8 data_size; /* size in dwords of the data following this header */ +}; + +/* Memory data for registers dump */ +struct dbg_dump_mem { + u32 dword0; +#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF +#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24 + u32 dword1; +#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF +#define DBG_DUMP_MEM_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24 +#define DBG_DUMP_MEM_RESERVED_MASK 0x7F +#define DBG_DUMP_MEM_RESERVED_SHIFT 25 +}; + +/* Register data for registers dump */ +struct dbg_dump_reg { + u32 data; +#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REG_ADDRESS_SHIFT 0 +#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1 +#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23 +#define DBG_DUMP_REG_LENGTH_MASK 0xFF +#define DBG_DUMP_REG_LENGTH_SHIFT 24 +}; + +/* Split header for registers dump */ +struct dbg_dump_split_hdr { + u32 hdr; +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF +#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0 +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF +#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24 +}; + +/* Condition header for idle check */ +struct dbg_idle_chk_cond_hdr { + struct dbg_mode_hdr mode; /* Mode header */ + u16 data_size; /* size in dwords of the data following this header */ +}; + +/* Idle Check condition register */ +struct dbg_idle_chk_cond_reg { + u32 data; +#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24 + u16 num_entries; + u8 entry_size; + u8 start_entry; +}; + +/* Idle Check info register */ +struct dbg_idle_chk_info_reg { + u32 data; +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF +#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1 +#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23 +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF +#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24 + u16 size; /* register size in dwords */ + struct dbg_mode_hdr mode; /* Mode header */ +}; + +/* Idle Check register */ +union dbg_idle_chk_reg { + struct dbg_idle_chk_cond_reg cond_reg; /* condition register */ + struct dbg_idle_chk_info_reg info_reg; /* info register */ +}; + +/* Idle Check result header */ +struct dbg_idle_chk_result_hdr { + u16 rule_id; /* Failing rule index */ + u16 mem_entry_id; /* Failing memory entry index */ + u8 num_dumped_cond_regs; /* number of dumped condition registers */ + u8 num_dumped_info_regs; /* number of dumped condition registers */ + u8 severity; /* from dbg_idle_chk_severity_types enum */ + u8 reserved; +}; + +/* Idle Check result register header */ +struct dbg_idle_chk_result_reg_hdr { + u8 data; +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1 +#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0 +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F +#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1 + u8 start_entry; /* index of the first checked entry */ + u16 size; /* register size in dwords */ +}; + +/* Idle Check rule */ +struct dbg_idle_chk_rule { + u16 rule_id; /* Idle Check rule ID */ + u8 severity; /* value from dbg_idle_chk_severity_types enum */ + u8 cond_id; /* Condition ID */ + u8 num_cond_regs; /* number of condition registers */ + u8 num_info_regs; /* number of info registers */ + u8 num_imms; /* number of immediates in the condition */ + u8 reserved1; + u16 reg_offset; /* offset of this rules registers in the idle check + * register array (in dbg_idle_chk_reg units). + */ + u16 imm_offset; /* offset of this rules immediate values in the + * immediate values array (in dwords). + */ +}; + +/* Idle Check rule parsing data */ +struct dbg_idle_chk_rule_parsing_data { + u32 data; +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0 +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF +#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1 +}; + +/* Idle check severity types */ +enum dbg_idle_chk_severity_types { + /* idle check failure should cause an error */ + IDLE_CHK_SEVERITY_ERROR, + /* idle check failure should cause an error only if theres no traffic */ + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, + /* idle check failure should cause a warning */ + IDLE_CHK_SEVERITY_WARNING, + MAX_DBG_IDLE_CHK_SEVERITY_TYPES +}; + +/* Reset register */ +struct dbg_reset_reg { + u32 data; +#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF +#define DBG_RESET_REG_ADDR_SHIFT 0 +#define DBG_RESET_REG_IS_REMOVED_MASK 0x1 +#define DBG_RESET_REG_IS_REMOVED_SHIFT 24 +#define DBG_RESET_REG_RESERVED_MASK 0x7F +#define DBG_RESET_REG_RESERVED_SHIFT 25 +}; + +/* Debug Bus block data */ +struct dbg_bus_block_data { + u8 enable_mask; + u8 right_shift; + u8 force_valid_mask; + u8 force_frame_mask; + u8 dword_mask; + u8 line_num; + u8 hw_id; + u8 flags; +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1 +#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0 +#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F +#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1 +}; + +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + +/* Debug Bus constraint operation types */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ, + DBG_BUS_CONSTRAINT_OP_NE, + DBG_BUS_CONSTRAINT_OP_LT, + DBG_BUS_CONSTRAINT_OP_LTC, + DBG_BUS_CONSTRAINT_OP_LE, + DBG_BUS_CONSTRAINT_OP_LEC, + DBG_BUS_CONSTRAINT_OP_GT, + DBG_BUS_CONSTRAINT_OP_GTC, + DBG_BUS_CONSTRAINT_OP_GE, + DBG_BUS_CONSTRAINT_OP_GEC, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + +/* Debug Bus trigger state data */ +struct dbg_bus_trigger_state_data { + u8 msg_len; + u8 constraint_dword_mask; + u8 storm_id; + u8 reserved; +}; + +/* Debug Bus memory address */ +struct dbg_bus_mem_addr { + u32 lo; + u32 hi; +}; + +/* Debug Bus PCI buffer data */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */ + struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */ + u32 size; /* PCI buffer size in bytes */ +}; + +/* Debug Bus Storm EID range filter params */ +struct dbg_bus_storm_eid_range_params { + u8 min; /* Minimal event ID to filter on */ + u8 max; /* Maximal event ID to filter on */ +}; + +/* Debug Bus Storm EID mask filter params */ +struct dbg_bus_storm_eid_mask_params { + u8 val; /* Event ID value */ + u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */ +}; + +/* Debug Bus Storm EID filter params */ +union dbg_bus_storm_eid_params { + struct dbg_bus_storm_eid_range_params range; + struct dbg_bus_storm_eid_mask_params mask; +}; + +/* Debug Bus Storm data */ +struct dbg_bus_storm_data { + u8 enabled; + u8 mode; + u8 hw_id; + u8 eid_filter_en; + u8 eid_range_not_mask; + u8 cid_filter_en; + union dbg_bus_storm_eid_params eid_filter_params; + u32 cid; +}; + +/* Debug Bus data */ +struct dbg_bus_data { + u32 app_version; + u8 state; + u8 mode_256b_en; + u8 num_enabled_blocks; + u8 num_enabled_storms; + u8 target; + u8 one_shot_en; + u8 grc_input_en; + u8 timestamp_input_en; + u8 filter_en; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; + u8 trigger_en; + u8 filter_constraint_dword_mask; + u8 next_trigger_state; + u8 next_constraint_id; + struct dbg_bus_trigger_state_data trigger_states[3]; + u8 filter_msg_len; + u8 rcv_from_other_engine; + u8 blocks_dword_mask; + u8 blocks_dword_overlap; + u32 hw_id_mask; + struct dbg_bus_pci_buf_data pci_buf; + struct dbg_bus_block_data blocks[132]; + struct dbg_bus_storm_data storms[6]; +}; + +/* Debug bus states */ +enum dbg_bus_states { + DBG_BUS_STATE_IDLE, + DBG_BUS_STATE_READY, + DBG_BUS_STATE_RECORDING, + DBG_BUS_STATE_STOPPED, + MAX_DBG_BUS_STATES +}; + +/* Debug Bus Storm modes */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF, + DBG_BUS_STORM_MODE_PRAM_ADDR, + DBG_BUS_STORM_MODE_DRA_RW, + DBG_BUS_STORM_MODE_DRA_W, + DBG_BUS_STORM_MODE_LD_ST_ADDR, + DBG_BUS_STORM_MODE_DRA_FSM, + DBG_BUS_STORM_MODE_FAST_DBGMUX, + DBG_BUS_STORM_MODE_RH, + DBG_BUS_STORM_MODE_RH_WITH_STORE, + DBG_BUS_STORM_MODE_FOC, + DBG_BUS_STORM_MODE_EXT_STORE, + MAX_DBG_BUS_STORM_MODES +}; + +/* Debug bus target IDs */ +enum dbg_bus_targets { + DBG_BUS_TARGET_ID_INT_BUF, + DBG_BUS_TARGET_ID_NIG, + DBG_BUS_TARGET_ID_PCI, + MAX_DBG_BUS_TARGETS +}; + +/* GRC Dump data */ +struct dbg_grc_data { + u8 params_initialized; + u8 reserved1; + u16 reserved2; + u32 param_val[48]; +}; + +/* Debug GRC params */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM, + DBG_GRC_PARAM_DUMP_MSTORM, + DBG_GRC_PARAM_DUMP_USTORM, + DBG_GRC_PARAM_DUMP_XSTORM, + DBG_GRC_PARAM_DUMP_YSTORM, + DBG_GRC_PARAM_DUMP_PSTORM, + DBG_GRC_PARAM_DUMP_REGS, + DBG_GRC_PARAM_DUMP_RAM, + DBG_GRC_PARAM_DUMP_PBUF, + DBG_GRC_PARAM_DUMP_IOR, + DBG_GRC_PARAM_DUMP_VFC, + DBG_GRC_PARAM_DUMP_CM_CTX, + DBG_GRC_PARAM_DUMP_PXP, + DBG_GRC_PARAM_DUMP_RSS, + DBG_GRC_PARAM_DUMP_CAU, + DBG_GRC_PARAM_DUMP_QM, + DBG_GRC_PARAM_DUMP_MCP, + DBG_GRC_PARAM_DUMP_DORQ, + DBG_GRC_PARAM_DUMP_CFC, + DBG_GRC_PARAM_DUMP_IGU, + DBG_GRC_PARAM_DUMP_BRB, + DBG_GRC_PARAM_DUMP_BTB, + DBG_GRC_PARAM_DUMP_BMB, + DBG_GRC_PARAM_RESERVD1, + DBG_GRC_PARAM_DUMP_MULD, + DBG_GRC_PARAM_DUMP_PRS, + DBG_GRC_PARAM_DUMP_DMAE, + DBG_GRC_PARAM_DUMP_TM, + DBG_GRC_PARAM_DUMP_SDM, + DBG_GRC_PARAM_DUMP_DIF, + DBG_GRC_PARAM_DUMP_STATIC, + DBG_GRC_PARAM_UNSTALL, + DBG_GRC_PARAM_RESERVED2, + DBG_GRC_PARAM_MCP_TRACE_META_SIZE, + DBG_GRC_PARAM_EXCLUDE_ALL, + DBG_GRC_PARAM_CRASH, + DBG_GRC_PARAM_PARITY_SAFE, + DBG_GRC_PARAM_DUMP_CM, + DBG_GRC_PARAM_DUMP_PHY, + DBG_GRC_PARAM_NO_MCP, + DBG_GRC_PARAM_NO_FW_VER, + DBG_GRC_PARAM_RESERVED3, + DBG_GRC_PARAM_DUMP_MCP_HW_DUMP, + DBG_GRC_PARAM_DUMP_ILT_CDUC, + DBG_GRC_PARAM_DUMP_ILT_CDUT, + DBG_GRC_PARAM_DUMP_CAU_EXT, + MAX_DBG_GRC_PARAMS +}; + +/* Debug status codes */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS, + DBG_STATUS_NO_MATCHING_FRAMING_MODE, + DBG_STATUS_VFC_READ_ERROR, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_256B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_DATA_DIDNT_TRIGGER, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_RESERVED0, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + DBG_STATUS_DBG_ARRAY_NOT_SET, + DBG_STATUS_RESERVED1, + DBG_STATUS_NON_MATCHING_LINES, + DBG_STATUS_INSUFFICIENT_HW_IDS, + DBG_STATUS_DBG_BUS_IN_USE, + DBG_STATUS_INVALID_STORM_DBG_MODE, + DBG_STATUS_OTHER_ENGINE_BB_ONLY, + DBG_STATUS_FILTER_SINGLE_HW_ID, + DBG_STATUS_TRIGGER_SINGLE_HW_ID, + DBG_STATUS_MISSING_TRIGGER_STATE_STORM, + MAX_DBG_STATUS +}; + +/* Debug Storms IDs */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + +/* Idle Check data */ +struct idle_chk_data { + u32 buf_size; + u8 buf_size_set; + u8 reserved1; + u16 reserved2; +}; + +struct pretend_params { + u8 split_type; + u8 reserved; + u16 split_id; +}; + +/* Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc; + struct dbg_bus_data bus; + struct idle_chk_data idle_chk; + u8 mode_enable[40]; + u8 block_in_reset[132]; + u8 chip_id; + u8 hw_type; + u8 num_ports; + u8 num_pfs_per_port; + u8 num_vfs; + u8 initialized; + u8 use_dmae; + u8 reserved; + struct pretend_params pretend; + u32 num_regs_read; +}; + +/* ILT Clients */ +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_TSDM, + ILT_CLI_RGFS, + ILT_CLI_TGFS, + MAX_ILT_CLIENTS +}; + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. + */ +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. + * + * Return: Void. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); + +/** + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. + * + * The FW info contains FW-related information, such as the FW version, + * FW image (main/L2B/kuku), FW timestamp, etc. + * The FW info is read from the internal RAM of the first Storm that is not in + * reset. + */ +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info); +/** + * qed_dbg_grc_config(): Sets the value of a GRC parameter. + * + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. + */ +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val); + +/** + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); +/** + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. + * + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); + +/** + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status +qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); +/** + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size); +/** + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords); + +/** + * qed_dbg_read_attn(): Reads the attention registers of the specified + * block and type, and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results); + +/** + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); + +/******************************* Data Types **********************************/ + +struct mcp_trace_format { + u32 data; +#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff +#define MCP_TRACE_FORMAT_MODULE_OFFSET 0 +#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000 +#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16 +#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000 +#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18 +#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000 +#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20 +#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000 +#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22 +#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000 +#define MCP_TRACE_FORMAT_LEN_OFFSET 24 + + char *format_str; +}; + +/* MCP Trace Meta data structure */ +struct mcp_trace_meta { + u32 modules_num; + char **modules; + u32 formats_num; + struct mcp_trace_format *formats; + bool is_allocated; +}; + +/* Debug Tools user data */ +struct dbg_tools_user_data { + struct mcp_trace_meta mcp_trace_meta; + const u32 *mcp_trace_user_meta_buf; +}; + +/******************************** Constants **********************************/ + +#define MAX_NAME_LEN 16 + +/***************************** Public Functions *******************************/ + +/** + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. + * + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr); + +/** + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. + * + * Return: dbg_status. + */ +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr); + +/** + * qed_dbg_get_status_str(): Returns a string for the specified status. + * + * @status: A debug status code. + * + * Return: A string for the specified status. + */ +const char *qed_dbg_get_status_str(enum dbg_status status); + +/** + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); +/** + * qed_print_idle_chk_results(): Prints idle check results + * + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, + u32 *num_warnings); + +/** + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. + * + * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to + * no NVRAM access). + */ +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf); + +/** + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_mcp_trace_results(): Prints MCP Trace results + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and + * keeps the MCP trace meta data allocated, to support continuous MCP Trace + * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should + * be called to free the meta data. + * + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf); + +/** + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line + * + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf); + +/** + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. + * Should be called after continuous MCP Trace parsing. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); + +/** + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_reg_fifo_results(): Prints reg fifo results. + * + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_igu_fifo_results(): Prints IGU fifo results + * + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_protection_override_results(): Prints protection override + * results. + * + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + +/** + * qed_print_fw_asserts_results(): Prints FW Asserts results. + * + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. + * + * Return: Error if the parsing fails, ok otherwise. + */ +enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf); + +/** + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. + * + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. + */ +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c new file mode 100644 index 0000000000..dc93ddea89 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -0,0 +1,2414 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/dcbnl.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_rdma.h" +#ifdef CONFIG_DCB +#include <linux/qed/qed_eth_if.h> +#endif + +#define QED_DCBX_MAX_MIB_READ_TRY (100) +#define QED_ETH_TYPE_DEFAULT (0) +#define QED_ETH_TYPE_ROCE (0x8915) +#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7) +#define QED_ETH_TYPE_FCOE (0x8906) +#define QED_TCP_PORT_ISCSI (0xCBC) + +#define QED_DCBX_INVALID_PRIORITY 0xFF + +/* Get Traffic Class from priority traffic class table, 4 bits represent + * the traffic class corresponding to the priority. + */ +#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \ + ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7) + +static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = { + {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_ISCSI}, + {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_FCOE}, + {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_ETH_ROCE}, + {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_ETH_ROCE}, + {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}, +}; + +static bool qed_dcbx_app_ethtype(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_ETHTYPE); +} + +static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE); +} + +static bool qed_dcbx_app_port(u32 app_info_bitmap) +{ + return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_PORT); +} + +static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type) +{ + u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE); + + /* Old MFW */ + if (mfw_val == DCBX_APP_SF_IEEE_RESERVED) + return qed_dcbx_app_port(app_info_bitmap); + + return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT); +} + +static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT)); +} + +static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_TCP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_TCP_PORT_ISCSI)); +} + +static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE)); +} + +static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool ethtype; + + if (ieee) + ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap); + else + ethtype = qed_dcbx_app_ethtype(app_info_bitmap); + + return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE)); +} + +static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee) +{ + bool port; + + if (ieee) + port = qed_dcbx_ieee_app_port(app_info_bitmap, + DCBX_APP_SF_IEEE_UDP_PORT); + else + port = qed_dcbx_app_port(app_info_bitmap); + + return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2)); +} + +static void +qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data) +{ + enum dcbx_protocol_type id; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n", + p_data->dcbx_enabled); + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", + qed_dcbx_app_update[i].name, p_data->arr[id].update, + p_data->arr[id].enable, p_data->arr[id].priority, + p_data->arr[id].tc, p_hwfn->hw_info.num_active_tc); + } +} + +static void +qed_dcbx_set_params(struct qed_dcbx_results *p_data, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool app_tlv, bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type, + enum qed_pci_personality personality) +{ + /* PF update ramrod data */ + p_data->arr[type].enable = enable; + p_data->arr[type].priority = prio; + p_data->arr[type].tc = tc; + if (enable) + p_data->arr[type].update = UPDATE_DCB; + else + p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; + + if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits)) + p_data->arr[type].dont_add_vlan0 = true; + + /* QM reconf data */ + if (app_tlv && p_hwfn->hw_info.personality == personality) + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc); + + /* Configure dcbx vlan priority in doorbell block for roce EDPM */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + type == DCBX_PROTOCOL_ROCE) { + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1); + } +} + +/* Update app protocol data and hw_info fields with the TLV info */ +static void +qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool app_tlv, bool enable, u8 prio, u8 tc, + enum dcbx_protocol_type type) +{ + enum qed_pci_personality personality; + enum dcbx_protocol_type id; + int i; + + for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) { + id = qed_dcbx_app_update[i].id; + + if (type != id) + continue; + + personality = qed_dcbx_app_update[i].personality; + + qed_dcbx_set_params(p_data, p_hwfn, p_ptt, app_tlv, enable, + prio, tc, type, personality); + } +} + +static bool +qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn, + u32 app_prio_bitmap, + u16 id, enum dcbx_protocol_type *type, bool ieee) +{ + if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_FCOE; + } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_ROCE; + } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_ISCSI; + } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_ETH; + } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) { + *type = DCBX_PROTOCOL_ROCE_V2; + } else { + *type = DCBX_MAX_PROTOCOL_TYPE; + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "No action required, App TLV entry = 0x%x\n", + app_prio_bitmap); + return false; + } + + return true; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int +qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_results *p_data, + struct dcbx_app_priority_entry *p_tbl, + u32 pri_tc_tbl, int count, u8 dcbx_version) +{ + enum dcbx_protocol_type type; + bool enable, ieee, eth_tlv; + u8 tc, priority_map; + u16 protocol_id; + int priority; + int i; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count); + + ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE); + eth_tlv = false; + /* Parse APP TLV */ + for (i = 0; i < count; i++) { + protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PRI_MAP); + priority = ffs(priority_map) - 1; + if (priority < 0) { + DP_ERR(p_hwfn, "Invalid priority\n"); + return -EINVAL; + } + + tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority); + if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + protocol_id, &type, ieee)) { + /* ETH always have the enable bit reset, as it gets + * vlan information per packet. For other protocols, + * should be set according to the dcbx_enabled + * indication, but we only got here if there was an + * app tlv for the protocol, so dcbx must be enabled. + */ + if (type == DCBX_PROTOCOL_ETH) { + enable = false; + eth_tlv = true; + } else { + enable = true; + } + + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, true, + enable, priority, tc, type); + } + } + + /* If Eth TLV is not detected, use UFP TC as default TC */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && !eth_tlv) + p_data->arr[DCBX_PROTOCOL_ETH].tc = p_hwfn->ufp_info.tc; + + /* Update ramrod protocol data and hw_info fields + * with default info when corresponding APP TLV's are not detected. + * The enabled field has a different logic for ethernet as only for + * ethernet dcb should disabled by default, as the information arrives + * from the OS (unless an explicit app tlv was present). + */ + tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; + priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; + for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { + if (p_data->arr[type].update) + continue; + + enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; + qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, false, enable, + priority, tc, type); + } + + return 0; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static int +qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct qed_dcbx_results data = { 0 }; + struct dcbx_ets_feature *p_ets; + struct qed_hw_info *p_info; + u32 pri_tc_tbl, flags; + u8 dcbx_version; + int num_entries; + int rc = 0; + + flags = p_hwfn->p_dcbx_info->operational.flags; + dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION); + + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pri_tc_tbl = p_ets->pri_tc_tbl[0]; + + p_info = &p_hwfn->hw_info; + num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); + + rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl, + num_entries, dcbx_version); + if (rc) + return rc; + + p_info->num_active_tc = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + p_hwfn->qm_info.ooo_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_OOO_TC); + data.pf_id = p_hwfn->rel_pf_id; + data.dcbx_enabled = !!dcbx_version; + + qed_dcbx_dp_protocol(p_hwfn, &data); + + memcpy(&p_hwfn->p_dcbx_info->results, &data, + sizeof(struct qed_dcbx_results)); + + return 0; +} + +static int +qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_dcbx_mib_meta_data *p_data, + enum qed_mib_read_type type) +{ + u32 prefix_seq_num, suffix_seq_num; + int read_count = 0; + int rc = 0; + + /* The data is considered to be valid only if both sequence numbers are + * the same. + */ + do { + if (type == QED_DCBX_REMOTE_LLDP_MIB) { + qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_remote->prefix_seq_num; + suffix_seq_num = p_data->lldp_remote->suffix_seq_num; + } else { + qed_memcpy_from(p_hwfn, p_ptt, p_data->mib, + p_data->addr, p_data->size); + prefix_seq_num = p_data->mib->prefix_seq_num; + suffix_seq_num = p_data->mib->suffix_seq_num; + } + read_count++; + + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + } while ((prefix_seq_num != suffix_seq_num) && + (read_count < QED_DCBX_MAX_MIB_READ_TRY)); + + if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) { + DP_ERR(p_hwfn, + "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + rc = -EIO; + } + + return rc; +} + +static void +qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn, + struct qed_dcbx_app_prio *p_prio, + struct qed_dcbx_results *p_results) +{ + u8 val; + + p_prio->roce = QED_DCBX_INVALID_PRIORITY; + p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY; + p_prio->iscsi = QED_DCBX_INVALID_PRIORITY; + p_prio->fcoe = QED_DCBX_INVALID_PRIORITY; + + if (p_results->arr[DCBX_PROTOCOL_ROCE].update && + p_results->arr[DCBX_PROTOCOL_ROCE].enable) + p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update && + p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) { + val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority; + p_prio->roce_v2 = val; + } + + if (p_results->arr[DCBX_PROTOCOL_ISCSI].update && + p_results->arr[DCBX_PROTOCOL_ISCSI].enable) + p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority; + + if (p_results->arr[DCBX_PROTOCOL_FCOE].update && + p_results->arr[DCBX_PROTOCOL_FCOE].enable) + p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority; + + if (p_results->arr[DCBX_PROTOCOL_ETH].update && + p_results->arr[DCBX_PROTOCOL_ETH].enable) + p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n", + p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe, + p_prio->eth); +} + +static void +qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct qed_dcbx_params *p_params, bool ieee) +{ + struct qed_app_entry *entry; + u8 pri_map; + int i; + + p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_WILLING); + p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED); + p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); + p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags, + DCBX_APP_NUM_ENTRIES); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_params->app_entry[i]; + if (ieee) { + u8 sf_ieee; + u32 val; + + sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF_IEEE); + switch (sf_ieee) { + case DCBX_APP_SF_IEEE_RESERVED: + /* Old MFW */ + val = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF); + entry->sf_ieee = val ? + QED_DCBX_SF_IEEE_TCP_UDP_PORT : + QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_ETHTYPE: + entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case DCBX_APP_SF_IEEE_TCP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case DCBX_APP_SF_IEEE_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case DCBX_APP_SF_IEEE_TCP_UDP_PORT: + entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + } + } else { + entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_SF)); + } + + pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP); + entry->prio = ffs(pri_map) - 1; + entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + entry->proto_id, + &entry->proto_type, ieee); + } + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "APP params: willing %d, valid %d error = %d\n", + p_params->app_willing, p_params->app_valid, + p_params->app_error); +} + +static void +qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn, + u32 pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map; + + p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING); + p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS); + p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED); + pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP); + p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0); + p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1); + p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2); + p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3); + p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4); + p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5); + p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6); + p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "PFC params: willing %d, pfc_bitmap %u max_tc = %u enabled = %d\n", + p_params->pfc.willing, pfc_map, p_params->pfc.max_tc, + p_params->pfc.enabled); +} + +static void +qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + __be32 bw_map[2], tsa_map[2]; + u32 pri_map; + int i; + + p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_WILLING); + p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_ENABLED); + p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS); + p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "ETS params: willing %d, enabled = %d ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, p_params->ets_enabled, + p_params->ets_cbs, p_ets->pri_tc_tbl[0], + p_params->max_ets_tc); + + if (p_params->ets_enabled && !p_params->max_ets_tc) { + p_params->max_ets_tc = QED_MAX_PFC_PRIORITIES; + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "ETS params: max_ets_tc is forced to %d\n", + p_params->max_ets_tc); + } + + /* 8 bit tsa and bw data corresponding to each of the 8 TC's are + * encoded in a type u32 array of size 2. + */ + cpu_to_be32_array(bw_map, p_ets->tc_bw_tbl, 2); + cpu_to_be32_array(tsa_map, p_ets->tc_tsa_tbl, 2); + pri_map = p_ets->pri_tc_tbl[0]; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i]; + p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i]; + p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i); + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "elem %d bw_tbl %x tsa_tbl %x\n", + i, p_params->ets_tc_bw_tbl[i], + p_params->ets_tc_tsa_tbl[i]); + } +} + +static void +qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct dcbx_ets_feature *p_ets, + u32 pfc, struct qed_dcbx_params *p_params, bool ieee) +{ + qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee); + qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params); + qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params); +} + +static void +qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->local_admin.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->local.params, false); + params->local.valid = true; +} + +static void +qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *params) +{ + struct dcbx_features *p_feat; + + p_feat = &p_hwfn->p_dcbx_info->remote.features; + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->remote.params, false); + params->remote.valid = true; +} + +static void +qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *params) +{ + struct qed_dcbx_operational_params *p_operational; + struct qed_dcbx_results *p_results; + struct dcbx_features *p_feat; + bool enabled, err; + u32 flags; + bool val; + + flags = p_hwfn->p_dcbx_info->operational.flags; + + /* If DCBx version is non zero, then negotiation + * was successfuly performed + */ + p_operational = ¶ms->operational; + enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != + DCBX_CONFIG_VERSION_DISABLED); + if (!enabled) { + p_operational->enabled = enabled; + p_operational->valid = false; + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx is disabled\n"); + return; + } + + p_feat = &p_hwfn->p_dcbx_info->operational.features; + p_results = &p_hwfn->p_dcbx_info->results; + + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_IEEE); + p_operational->ieee = val; + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_CEE); + p_operational->cee = val; + + val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_STATIC); + p_operational->local = val; + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Version support: ieee %d, cee %d, static %d\n", + p_operational->ieee, p_operational->cee, + p_operational->local); + + qed_dcbx_get_common_params(p_hwfn, &p_feat->app, + p_feat->app.app_pri_tbl, &p_feat->ets, + p_feat->pfc, ¶ms->operational.params, + p_operational->ieee); + qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results); + err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR); + p_operational->err = err; + p_operational->enabled = enabled; + p_operational->valid = true; +} + +static void +qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *params) +{ + struct lldp_config_params_s *p_local; + + p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, + sizeof(p_local->local_chassis_id)); + memcpy(params->lldp_local.local_port_id, p_local->local_port_id, + sizeof(p_local->local_port_id)); +} + +static void +qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *params) +{ + struct lldp_status_params_s *p_remote; + + p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; + + memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, + sizeof(p_remote->peer_chassis_id)); + memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, + sizeof(p_remote->peer_port_id)); +} + +static int +qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_dcbx_get *p_params, + enum qed_mib_read_type type) +{ + switch (type) { + case QED_DCBX_REMOTE_MIB: + qed_dcbx_get_remote_params(p_hwfn, p_params); + break; + case QED_DCBX_LOCAL_MIB: + qed_dcbx_get_local_params(p_hwfn, p_params); + break; + case QED_DCBX_OPERATIONAL_MIB: + qed_dcbx_get_operational_params(p_hwfn, p_params); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + qed_dcbx_get_remote_lldp_params(p_hwfn, p_params); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + qed_dcbx_get_local_lldp_params(p_hwfn, p_params); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return -EINVAL; + } + + return 0; +} + +static int +qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_config_params); + data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; + data.size = sizeof(struct lldp_config_params_s); + qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); + + return 0; +} + +static int +qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_status_params); + data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; + data.size = sizeof(struct lldp_status_params_s); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, operational_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->operational; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + struct qed_dcbx_mib_meta_data data; + int rc = 0; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, remote_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->remote; + data.size = sizeof(struct dcbx_mib); + rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static int +qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dcbx_mib_meta_data data; + + memset(&data, 0, sizeof(data)); + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &p_hwfn->p_dcbx_info->local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); + + return 0; +} + +static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = -EINVAL; + + switch (type) { + case QED_DCBX_OPERATIONAL_MIB: + rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_REMOTE_MIB: + rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_MIB: + rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt); + break; + case QED_DCBX_REMOTE_LLDP_MIB: + rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); + break; + case QED_DCBX_LOCAL_LLDP_MIB: + rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + } + + return rc; +} + +static void qed_dcbx_aen(struct qed_hwfn *hwfn, u32 mib_type) +{ + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + void *cookie = hwfn->cdev->ops_cookie; + + if (cookie && op->dcbx_aen) + op->dcbx_aen(cookie, &hwfn->p_dcbx_info->get, mib_type); +} + +/* Read updated MIB. + * Reconfigure QM and invoke PF update ramrod command if operational MIB + * change is detected. + */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_mib_read_type type) +{ + int rc = 0; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + return rc; + + if (type == QED_DCBX_OPERATIONAL_MIB) { + rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt); + if (!rc) { + /* reconfigure tcs of QM queues according + * to negotiation results + */ + qed_qm_reconf(p_hwfn, p_ptt); + + /* update storm FW with negotiation results */ + qed_sp_pf_update(p_hwfn); + + /* for roce PFs, we may want to enable/disable DPM + * when DCBx change occurs + */ + if (p_hwfn->hw_info.personality == + QED_PCI_ETH_ROCE) + qed_roce_dpm_dcbx(p_hwfn, p_ptt); + } + } + + qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); + + if (type == QED_DCBX_OPERATIONAL_MIB) { + struct qed_dcbx_results *p_data; + u16 val; + + /* Configure in NIG which protocols support EDPM and should + * honor PFC. + */ + p_data = &p_hwfn->p_dcbx_info->results; + val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) | + (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc); + val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT; + val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN; + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val); + } + + qed_dcbx_aen(p_hwfn, type); + + return rc; +} + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn) +{ + p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL); + if (!p_hwfn->p_dcbx_info) + return -ENOMEM; + + return 0; +} + +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_dcbx_info); + p_hwfn->p_dcbx_info = NULL; +} + +static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, + struct qed_dcbx_results *p_src, + enum dcbx_protocol_type type) +{ + p_data->dcb_enable_flag = p_src->arr[type].enable; + p_data->dcb_priority = p_src->arr[type].priority; + p_data->dcb_tc = p_src->arr[type].tc; + p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0; +} + +/* Set pf update ramrod command params */ +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest) +{ + struct protocol_dcb_data *p_dcb_data; + u8 update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; + p_dest->update_fcoe_dcb_data_mode = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update; + p_dest->update_roce_dcb_data_mode = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update; + p_dest->update_rroce_dcb_data_mode = update_flag; + + update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update; + p_dest->update_iscsi_dcb_data_mode = update_flag; + update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; + p_dest->update_eth_dcb_data_mode = update_flag; + + p_dcb_data = &p_dest->fcoe_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE); + p_dcb_data = &p_dest->roce_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE); + p_dcb_data = &p_dest->rroce_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ROCE_V2); + p_dcb_data = &p_dest->iscsi_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI); + p_dcb_data = &p_dest->eth_dcb_data; + qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); +} + +u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri) +{ + struct qed_dcbx_get *dcbx_info = &p_hwfn->p_dcbx_info->get; + + if (pri >= QED_MAX_PFC_PRIORITIES) { + DP_ERR(p_hwfn, "Invalid priority %d\n", pri); + return QED_DCBX_DEFAULT_TC; + } + + if (!dcbx_info->operational.valid) { + DP_VERBOSE(p_hwfn, QED_MSG_DCB, + "Dcbx parameters not available\n"); + return QED_DCBX_DEFAULT_TC; + } + + return dcbx_info->operational.params.ets_pri_tc_tbl[pri]; +} + +#ifdef CONFIG_DCB +static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_get *p_get, + enum qed_mib_read_type type) +{ + struct qed_ptt *p_ptt; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + goto out; + + rc = qed_dcbx_get_params(p_hwfn, p_get, type); + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void +qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn, + u32 *pfc, struct qed_dcbx_params *p_params) +{ + u8 pfc_map = 0; + int i; + + *pfc &= ~DCBX_PFC_ERROR_MASK; + + if (p_params->pfc.willing) + *pfc |= DCBX_PFC_WILLING_MASK; + else + *pfc &= ~DCBX_PFC_WILLING_MASK; + + if (p_params->pfc.enabled) + *pfc |= DCBX_PFC_ENABLED_MASK; + else + *pfc &= ~DCBX_PFC_ENABLED_MASK; + + *pfc &= ~DCBX_PFC_CAPS_MASK; + *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (p_params->pfc.prio[i]) + pfc_map |= BIT(i); + + *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK; + *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT); + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc); +} + +static void +qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct qed_dcbx_params *p_params) +{ + __be32 bw_map[2], tsa_map[2]; + u32 val; + int i; + + if (p_params->ets_willing) + p_ets->flags |= DCBX_ETS_WILLING_MASK; + else + p_ets->flags &= ~DCBX_ETS_WILLING_MASK; + + if (p_params->ets_cbs) + p_ets->flags |= DCBX_ETS_CBS_MASK; + else + p_ets->flags &= ~DCBX_ETS_CBS_MASK; + + if (p_params->ets_enabled) + p_ets->flags |= DCBX_ETS_ENABLED_MASK; + else + p_ets->flags &= ~DCBX_ETS_ENABLED_MASK; + + p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK; + p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT; + + p_ets->pri_tc_tbl[0] = 0; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + ((u8 *)bw_map)[i] = p_params->ets_tc_bw_tbl[i]; + ((u8 *)tsa_map)[i] = p_params->ets_tc_tsa_tbl[i]; + + /* Copy the priority value to the corresponding 4 bits in the + * traffic class table. + */ + val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4)); + p_ets->pri_tc_tbl[0] |= val; + } + + be32_to_cpu_array(p_ets->tc_bw_tbl, bw_map, 2); + be32_to_cpu_array(p_ets->tc_tsa_tbl, tsa_map, 2); +} + +static void +qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct qed_dcbx_params *p_params, bool ieee) +{ + u32 *entry; + int i; + + if (p_params->app_willing) + p_app->flags |= DCBX_APP_WILLING_MASK; + else + p_app->flags &= ~DCBX_APP_WILLING_MASK; + + if (p_params->app_valid) + p_app->flags |= DCBX_APP_ENABLED_MASK; + else + p_app->flags &= ~DCBX_APP_ENABLED_MASK; + + p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK; + p_app->flags |= (u32)p_params->num_app_entries << + DCBX_APP_NUM_ENTRIES_SHIFT; + + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) { + entry = &p_app->app_pri_tbl[i].entry; + *entry = 0; + if (ieee) { + *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK); + switch (p_params->app_entry[i].sf_ieee) { + case QED_DCBX_SF_IEEE_ETHTYPE: + *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + case QED_DCBX_SF_IEEE_TCP_UDP_PORT: + *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT << + DCBX_APP_SF_IEEE_SHIFT); + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + break; + } + } else { + *entry &= ~DCBX_APP_SF_MASK; + if (p_params->app_entry[i].ethtype) + *entry |= ((u32)DCBX_APP_SF_ETHTYPE << + DCBX_APP_SF_SHIFT); + else + *entry |= ((u32)DCBX_APP_SF_PORT << + DCBX_APP_SF_SHIFT); + } + + *entry &= ~DCBX_APP_PROTOCOL_ID_MASK; + *entry |= ((u32)p_params->app_entry[i].proto_id << + DCBX_APP_PROTOCOL_ID_SHIFT); + *entry &= ~DCBX_APP_PRI_MAP_MASK; + *entry |= ((u32)(p_params->app_entry[i].prio) << + DCBX_APP_PRI_MAP_SHIFT); + } +} + +static void +qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn, + struct dcbx_local_params *local_admin, + struct qed_dcbx_set *params) +{ + bool ieee = false; + + local_admin->flags = 0; + memcpy(&local_admin->features, + &p_hwfn->p_dcbx_info->operational.features, + sizeof(local_admin->features)); + + if (params->enabled) { + local_admin->config = params->ver_num; + ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE); + } else { + local_admin->config = DCBX_CONFIG_VERSION_DISABLED; + } + + DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Dcbx version = %d\n", + local_admin->config); + + if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG) + qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG) + qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets, + ¶ms->config.params); + + if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG) + qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app, + ¶ms->config.params, ieee); +} + +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit) +{ + struct dcbx_local_params local_admin; + struct qed_dcbx_mib_meta_data data; + u32 resp = 0, param = 0; + int rc = 0; + + if (!hw_commit) { + memcpy(&p_hwfn->p_dcbx_info->set, params, + sizeof(struct qed_dcbx_set)); + return 0; + } + + /* clear set-parmas cache */ + memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set)); + + memset(&local_admin, 0, sizeof(local_admin)); + qed_dcbx_set_local_params(p_hwfn, &local_admin, params); + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &local_admin; + data.size = sizeof(struct dcbx_local_params); + qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n"); + + return rc; +} + +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params) +{ + struct qed_dcbx_get *dcbx_info; + int rc; + + if (p_hwfn->p_dcbx_info->set.config.valid) { + memcpy(params, &p_hwfn->p_dcbx_info->set, + sizeof(struct qed_dcbx_set)); + return 0; + } + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL); + if (!dcbx_info) + return -ENOMEM; + + rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB); + if (rc) { + kfree(dcbx_info); + return rc; + } + + p_hwfn->p_dcbx_info->set.override_flags = 0; + p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED; + if (dcbx_info->operational.cee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE; + if (dcbx_info->operational.ieee) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + if (dcbx_info->operational.local) + p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC; + + p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled; + BUILD_BUG_ON(sizeof(dcbx_info->operational.params) != + sizeof(p_hwfn->p_dcbx_info->set.config.params)); + memcpy(&p_hwfn->p_dcbx_info->set.config.params, + &dcbx_info->operational.params, + sizeof(p_hwfn->p_dcbx_info->set.config.params)); + p_hwfn->p_dcbx_info->set.config.valid = true; + + memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set)); + + kfree(dcbx_info); + + return 0; +} + +static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn, + enum qed_mib_read_type type) +{ + struct qed_dcbx_get *dcbx_info; + + dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_ATOMIC); + if (!dcbx_info) + return NULL; + + if (qed_dcbx_query_params(hwfn, dcbx_info, type)) { + kfree(dcbx_info); + return NULL; + } + + if ((type == QED_DCBX_OPERATIONAL_MIB) && + !dcbx_info->operational.enabled) { + DP_INFO(hwfn, "DCBX is not enabled/operational\n"); + kfree(dcbx_info); + return NULL; + } + + return dcbx_info; +} + +static u8 qed_dcbnl_getstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc ? 1 : 0; +} + +static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type, + u8 *pgid, u8 *bw_pct, u8 *up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc); + *prio_type = *pgid = *bw_pct = *up_map = 0; + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc]; + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + *bw_pct = 0; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct); + kfree(dcbx_info); +} + +static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio, + u8 *bwg_id, u8 *bw_pct, u8 *up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *prio = *bwg_id = *bw_pct = *up_map = 0; +} + +static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev, + int bwg_id, u8 *bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); + *bw_pct = 0; +} + +static void qed_dcbnl_getpfccfg(struct qed_dev *cdev, + int priority, u8 *setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return; + + *setting = dcbx_info->operational.params.pfc.prio[priority]; + DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting); + kfree(dcbx_info); +} + +static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n", + priority, setting); + if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", priority); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.prio[priority] = !!setting; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + switch (capid) { + case DCB_CAP_ATTR_PG: + case DCB_CAP_ATTR_PFC: + case DCB_CAP_ATTR_UP2TC: + case DCB_CAP_ATTR_GSP: + *cap = true; + break; + case DCB_CAP_ATTR_PG_TCS: + case DCB_CAP_ATTR_PFC_TCS: + *cap = 0x80; + break; + case DCB_CAP_ATTR_DCBX: + *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE | + DCB_CAP_DCBX_STATIC); + break; + default: + *cap = false; + rc = 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap); + kfree(dcbx_info); + + return rc; +} + +static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int rc = 0; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + *num = dcbx_info->operational.params.max_ets_tc; + break; + case DCB_NUMTCS_ATTR_PFC: + *num = dcbx_info->operational.params.pfc.max_tc; + break; + default: + rc = -EINVAL; + } + + kfree(dcbx_info); + DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num); + + return rc; +} + +static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + bool enabled; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + enabled = dcbx_info->operational.params.pfc.enabled; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled); + kfree(dcbx_info); + + return enabled; +} + +static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + u8 mode = 0; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 0; + + if (dcbx_info->operational.ieee) + mode |= DCB_CAP_DCBX_VER_IEEE; + if (dcbx_info->operational.cee) + mode |= DCB_CAP_DCBX_VER_CEE; + if (dcbx_info->operational.local) + mode |= DCB_CAP_DCBX_STATIC; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode); + kfree(dcbx_info); + + return mode; +} + +static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev, + int tc, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, + "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n", + tc, pri_type, pgid, bw_pct, up_map); + + if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid tc %d\n", tc); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio, + u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct); + if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid pgid %d\n", pgid); + return; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct) +{ + DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n"); +} + +static u8 qed_dcbnl_setall(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + switch (tcid) { + case DCB_NUMTCS_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = num; + break; + case DCB_NUMTCS_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.max_tc = num; + break; + default: + DP_INFO(hwfn, "Invalid tcid %d\n", tcid); + return -EINVAL; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = !!state; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); +} + +static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + bool ethtype; + u8 prio = 0; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + return prio; +} + +static int qed_dcbnl_setapp(struct qed_dev *cdev, + u8 idtype, u16 idval, u8 pri_map) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + bool ethtype; + int rc, i; + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE); + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].ethtype = ethtype; + dcbx_set.config.params.app_entry[i].proto_id = idval; + dcbx_set.config.params.app_entry[i].prio = pri_map; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode); + + if (!(mode & DCB_CAP_DCBX_VER_IEEE) && + !(mode & DCB_CAP_DCBX_VER_CEE) && !(mode & DCB_CAP_DCBX_STATIC)) { + DP_INFO(hwfn, "Allowed modes are cee, ieee or static\n"); + return 1; + } + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + dcbx_set.ver_num = 0; + if (mode & DCB_CAP_DCBX_VER_CEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE; + dcbx_set.enabled = true; + } + + if (mode & DCB_CAP_DCBX_VER_IEEE) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE; + dcbx_set.enabled = true; + } + + if (mode & DCB_CAP_DCBX_STATIC) { + dcbx_set.ver_num |= DCBX_CONFIG_VERSION_STATIC; + dcbx_set.enabled = true; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid); + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return 1; + + *flags = 0; + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + if (dcbx_info->operational.params.ets_enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_PFC: + if (dcbx_info->operational.params.pfc.enabled) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + case DCB_FEATCFG_ATTR_APP: + if (dcbx_info->operational.params.app_valid) + *flags = DCB_FEATCFG_ENABLE; + else + *flags = DCB_FEATCFG_ERROR; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + kfree(dcbx_info); + return 1; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags); + kfree(dcbx_info); + + return 0; +} + +static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_set dcbx_set; + bool enabled, willing; + struct qed_ptt *ptt; + int rc; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n", + featid, flags); + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return 1; + + enabled = !!(flags & DCB_FEATCFG_ENABLE); + willing = !!(flags & DCB_FEATCFG_WILLING); + switch (featid) { + case DCB_FEATCFG_ATTR_PG: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.ets_enabled = enabled; + dcbx_set.config.params.ets_willing = willing; + break; + case DCB_FEATCFG_ATTR_PFC: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + dcbx_set.config.params.pfc.enabled = enabled; + dcbx_set.config.params.pfc.willing = willing; + break; + case DCB_FEATCFG_ATTR_APP: + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_willing = willing; + break; + default: + DP_INFO(hwfn, "Invalid feature-ID %d\n", featid); + return 1; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return 1; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return 0; +} + +static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev, + struct dcb_peer_app_info *info, + u16 *app_count) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + info->willing = dcbx_info->remote.params.app_willing; + info->error = dcbx_info->remote.params.app_error; + *app_count = dcbx_info->remote.params.num_app_entries; + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev, + struct dcb_app *table) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) { + if (dcbx_info->remote.params.app_entry[i].ethtype) + table[i].selector = DCB_APP_IDTYPE_ETHTYPE; + else + table[i].selector = DCB_APP_IDTYPE_PORTNUM; + table[i].priority = dcbx_info->remote.params.app_entry[i].prio; + table[i].protocol = + dcbx_info->remote.params.app_entry[i].proto_id; + } + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (dcbx_info->remote.params.pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc; + DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n", + pfc->pfc_en, pfc->tcs_supported); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + int i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB); + if (!dcbx_info) + return -EINVAL; + + pg->willing = dcbx_info->remote.params.ets_willing; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) { + pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i]; + pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i]; + } + + DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev, + struct ieee_pfc *pfc, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_params *params; + struct qed_dcbx_get *dcbx_info; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + pfc->pfc_cap = params->pfc.max_tc; + pfc->pfc_en = 0; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + if (params->pfc.prio[i]) + pfc->pfc_en |= BIT(i); + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, false); +} + +static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc, i; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i)); + + dcbx_set.config.params.pfc.max_tc = pfc->pfc_cap; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev, + struct ieee_ets *ets, bool remote) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_params *params; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + if (remote) { + memset(dcbx_info, 0, sizeof(*dcbx_info)); + rc = qed_dcbx_query_params(hwfn, dcbx_info, + QED_DCBX_REMOTE_MIB); + if (rc) { + kfree(dcbx_info); + return -EINVAL; + } + + params = &dcbx_info->remote.params; + } else { + params = &dcbx_info->operational.params; + } + + ets->ets_cap = params->max_ets_tc; + ets->willing = params->ets_willing; + ets->cbs = params->ets_cbs; + memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa)); + memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc)); + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, false); +} + +static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_ptt *ptt; + int rc; + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG; + dcbx_set.config.params.max_ets_tc = ets->ets_cap; + dcbx_set.config.params.ets_willing = ets->willing; + dcbx_set.config.params.ets_cbs = ets->cbs; + memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw, + sizeof(ets->tc_tx_bw)); + memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa, + sizeof(ets->tc_tsa)); + memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc, + sizeof(ets->prio_tc)); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EINVAL; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int +qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets) +{ + return qed_dcbnl_get_ieee_ets(cdev, ets, true); +} + +static int +qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc) +{ + return qed_dcbnl_get_ieee_pfc(cdev, pfc, true); +} + +static int qed_get_sf_ieee_value(u8 selector, u8 *sf_ieee) +{ + switch (selector) { + case IEEE_8021QAZ_APP_SEL_ETHERTYPE: + *sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE; + break; + case IEEE_8021QAZ_APP_SEL_STREAM: + *sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT; + break; + case IEEE_8021QAZ_APP_SEL_DGRAM: + *sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT; + break; + case IEEE_8021QAZ_APP_SEL_ANY: + *sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_app_entry *entry; + u8 prio = 0; + u8 sf_ieee; + int i; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d\n", + app->selector, app->protocol); + + if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { + DP_INFO(cdev, "Invalid selector field value %d\n", + app->selector); + return -EINVAL; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_info->operational.params.app_entry[i]; + if ((entry->sf_ieee == sf_ieee) && + (entry->proto_id == app->protocol)) { + prio = entry->prio; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector, + app->protocol); + kfree(dcbx_info); + return -EINVAL; + } + + app->priority = ffs(prio) - 1; + + kfree(dcbx_info); + + return 0; +} + +static int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_dcbx_get *dcbx_info; + struct qed_dcbx_set dcbx_set; + struct qed_app_entry *entry; + struct qed_ptt *ptt; + u8 sf_ieee; + int rc, i; + + DP_VERBOSE(hwfn, QED_MSG_DCB, "selector = %d protocol = %d pri = %d\n", + app->selector, app->protocol, app->priority); + if (app->priority >= QED_MAX_PFC_PRIORITIES) { + DP_INFO(hwfn, "Invalid priority %d\n", app->priority); + return -EINVAL; + } + + if (qed_get_sf_ieee_value(app->selector, &sf_ieee)) { + DP_INFO(cdev, "Invalid selector field value %d\n", + app->selector); + return -EINVAL; + } + + dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB); + if (!dcbx_info) + return -EINVAL; + + if (!dcbx_info->operational.ieee) { + DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n"); + kfree(dcbx_info); + return -EINVAL; + } + + kfree(dcbx_info); + + memset(&dcbx_set, 0, sizeof(dcbx_set)); + rc = qed_dcbx_get_config_params(hwfn, &dcbx_set); + if (rc) + return -EINVAL; + + for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) { + entry = &dcbx_set.config.params.app_entry[i]; + if ((entry->sf_ieee == sf_ieee) && + (entry->proto_id == app->protocol)) + break; + /* First empty slot */ + if (!entry->proto_id) { + dcbx_set.config.params.num_app_entries++; + break; + } + } + + if (i == QED_DCBX_MAX_APP_PROTOCOL) { + DP_ERR(cdev, "App table is full\n"); + return -EBUSY; + } + + dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG; + dcbx_set.config.params.app_entry[i].sf_ieee = sf_ieee; + dcbx_set.config.params.app_entry[i].proto_id = app->protocol; + dcbx_set.config.params.app_entry[i].prio = BIT(app->priority); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = { + .getstate = qed_dcbnl_getstate, + .setstate = qed_dcbnl_setstate, + .getpgtccfgtx = qed_dcbnl_getpgtccfgtx, + .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx, + .getpgtccfgrx = qed_dcbnl_getpgtccfgrx, + .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx, + .getpfccfg = qed_dcbnl_getpfccfg, + .setpfccfg = qed_dcbnl_setpfccfg, + .getcap = qed_dcbnl_getcap, + .getnumtcs = qed_dcbnl_getnumtcs, + .getpfcstate = qed_dcbnl_getpfcstate, + .getdcbx = qed_dcbnl_getdcbx, + .setpgtccfgtx = qed_dcbnl_setpgtccfgtx, + .setpgtccfgrx = qed_dcbnl_setpgtccfgrx, + .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx, + .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx, + .setall = qed_dcbnl_setall, + .setnumtcs = qed_dcbnl_setnumtcs, + .setpfcstate = qed_dcbnl_setpfcstate, + .setapp = qed_dcbnl_setapp, + .setdcbx = qed_dcbnl_setdcbx, + .setfeatcfg = qed_dcbnl_setfeatcfg, + .getfeatcfg = qed_dcbnl_getfeatcfg, + .getapp = qed_dcbnl_getapp, + .peer_getappinfo = qed_dcbnl_peer_getappinfo, + .peer_getapptable = qed_dcbnl_peer_getapptable, + .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc, + .cee_peer_getpg = qed_dcbnl_cee_peer_getpg, + .ieee_getpfc = qed_dcbnl_ieee_getpfc, + .ieee_setpfc = qed_dcbnl_ieee_setpfc, + .ieee_getets = qed_dcbnl_ieee_getets, + .ieee_setets = qed_dcbnl_ieee_setets, + .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc, + .ieee_peer_getets = qed_dcbnl_ieee_peer_getets, + .ieee_getapp = qed_dcbnl_ieee_getapp, + .ieee_setapp = qed_dcbnl_ieee_setapp, +}; + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h new file mode 100644 index 0000000000..ea839e6055 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_DCBX_H +#define _QED_DCBX_H +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 + +enum qed_mib_read_type { + QED_DCBX_OPERATIONAL_MIB, + QED_DCBX_REMOTE_MIB, + QED_DCBX_LOCAL_MIB, + QED_DCBX_REMOTE_LLDP_MIB, + QED_DCBX_LOCAL_LLDP_MIB +}; + +struct qed_dcbx_app_data { + bool enable; /* DCB enabled */ + u8 update; /* Update indication */ + u8 priority; /* Priority */ + u8 tc; /* Traffic Class */ + bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */ +}; + +#define QED_DCBX_VERSION_DISABLED 0 +#define QED_DCBX_VERSION_IEEE 1 +#define QED_DCBX_VERSION_CEE 2 + +struct qed_dcbx_set { +#define QED_DCBX_OVERRIDE_STATE BIT(0) +#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1) +#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2) +#define QED_DCBX_OVERRIDE_APP_CFG BIT(3) +#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4) + u32 override_flags; + bool enabled; + struct qed_dcbx_admin_params config; + u32 ver_num; +}; + +struct qed_dcbx_results { + bool dcbx_enabled; + u8 pf_id; + struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE]; +}; + +struct qed_dcbx_app_metadata { + enum dcbx_protocol_type id; + char *name; + enum qed_pci_personality personality; +}; + +struct qed_dcbx_info { + struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; + struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; + struct dcbx_local_params local_admin; + struct qed_dcbx_results results; + struct dcbx_mib operational; + struct dcbx_mib remote; + struct qed_dcbx_set set; + struct qed_dcbx_get get; + u8 dcbx_cap; +}; + +struct qed_dcbx_mib_meta_data { + struct lldp_config_params_s *lldp_local; + struct lldp_status_params_s *lldp_remote; + struct dcbx_local_params *local_admin; + struct dcbx_mib *mib; + size_t size; + u32 addr; +}; + +extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass; + +#ifdef CONFIG_DCB +int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn, + struct qed_dcbx_set *params); + +int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_dcbx_set *params, bool hw_commit); +#endif + +/* QED local interface routines */ +int +qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_mib_read_type type); + +int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn); +void qed_dcbx_info_free(struct qed_hwfn *p_hwfn); +void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest); + +#define QED_DCBX_DEFAULT_TC 0 + +u8 qed_dcbx_get_priority_tc(struct qed_hwfn *p_hwfn, u8 pri); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c new file mode 100644 index 0000000000..cdcead614e --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c @@ -0,0 +1,8712 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/crc32.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_dbg_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +/* Memory groups enum */ +enum mem_groups { + MEM_GROUP_PXP_MEM, + MEM_GROUP_DMAE_MEM, + MEM_GROUP_CM_MEM, + MEM_GROUP_QM_MEM, + MEM_GROUP_DORQ_MEM, + MEM_GROUP_BRB_RAM, + MEM_GROUP_BRB_MEM, + MEM_GROUP_PRS_MEM, + MEM_GROUP_SDM_MEM, + MEM_GROUP_PBUF, + MEM_GROUP_IOR, + MEM_GROUP_RAM, + MEM_GROUP_BTB_RAM, + MEM_GROUP_RDIF_CTX, + MEM_GROUP_TDIF_CTX, + MEM_GROUP_CFC_MEM, + MEM_GROUP_CONN_CFC_MEM, + MEM_GROUP_CAU_PI, + MEM_GROUP_CAU_MEM, + MEM_GROUP_CAU_MEM_EXT, + MEM_GROUP_PXP_ILT, + MEM_GROUP_MULD_MEM, + MEM_GROUP_BTB_MEM, + MEM_GROUP_IGU_MEM, + MEM_GROUP_IGU_MSIX, + MEM_GROUP_CAU_SB, + MEM_GROUP_BMB_RAM, + MEM_GROUP_BMB_MEM, + MEM_GROUP_TM_MEM, + MEM_GROUP_TASK_CFC_MEM, + MEM_GROUPS_NUM +}; + +/* Memory groups names */ +static const char * const s_mem_group_names[] = { + "PXP_MEM", + "DMAE_MEM", + "CM_MEM", + "QM_MEM", + "DORQ_MEM", + "BRB_RAM", + "BRB_MEM", + "PRS_MEM", + "SDM_MEM", + "PBUF", + "IOR", + "RAM", + "BTB_RAM", + "RDIF_CTX", + "TDIF_CTX", + "CFC_MEM", + "CONN_CFC_MEM", + "CAU_PI", + "CAU_MEM", + "CAU_MEM_EXT", + "PXP_ILT", + "MULD_MEM", + "BTB_MEM", + "IGU_MEM", + "IGU_MSIX", + "CAU_SB", + "BMB_RAM", + "BMB_MEM", + "TM_MEM", + "TASK_CFC_MEM", +}; + +/* Idle check conditions */ + +static u32 cond5(const u32 *r, const u32 *imm) +{ + return ((r[0] & imm[0]) != imm[1]) && ((r[1] & imm[2]) != imm[3]); +} + +static u32 cond7(const u32 *r, const u32 *imm) +{ + return ((r[0] >> imm[0]) & imm[1]) != imm[2]; +} + +static u32 cond6(const u32 *r, const u32 *imm) +{ + return (r[0] & imm[0]) != imm[1]; +} + +static u32 cond9(const u32 *r, const u32 *imm) +{ + return ((r[0] & imm[0]) >> imm[1]) != + (((r[0] & imm[2]) >> imm[3]) | ((r[1] & imm[4]) << imm[5])); +} + +static u32 cond10(const u32 *r, const u32 *imm) +{ + return ((r[0] & imm[0]) >> imm[1]) != (r[0] & imm[2]); +} + +static u32 cond4(const u32 *r, const u32 *imm) +{ + return (r[0] & ~imm[0]) != imm[1]; +} + +static u32 cond0(const u32 *r, const u32 *imm) +{ + return (r[0] & ~r[1]) != imm[0]; +} + +static u32 cond14(const u32 *r, const u32 *imm) +{ + return (r[0] | imm[0]) != imm[1]; +} + +static u32 cond1(const u32 *r, const u32 *imm) +{ + return r[0] != imm[0]; +} + +static u32 cond11(const u32 *r, const u32 *imm) +{ + return r[0] != r[1] && r[2] == imm[0]; +} + +static u32 cond12(const u32 *r, const u32 *imm) +{ + return r[0] != r[1] && r[2] > imm[0]; +} + +static u32 cond3(const u32 *r, const u32 *imm) +{ + return r[0] != r[1]; +} + +static u32 cond13(const u32 *r, const u32 *imm) +{ + return r[0] & imm[0]; +} + +static u32 cond8(const u32 *r, const u32 *imm) +{ + return r[0] < (r[1] - imm[0]); +} + +static u32 cond2(const u32 *r, const u32 *imm) +{ + return r[0] > imm[0]; +} + +/* Array of Idle Check conditions */ +static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = { + cond0, + cond1, + cond2, + cond3, + cond4, + cond5, + cond6, + cond7, + cond8, + cond9, + cond10, + cond11, + cond12, + cond13, + cond14, +}; + +#define NUM_PHYS_BLOCKS 84 + +#define NUM_DBG_RESET_REGS 8 + +/******************************* Data Types **********************************/ + +enum hw_types { + HW_TYPE_ASIC, + PLATFORM_RESERVED, + PLATFORM_RESERVED2, + PLATFORM_RESERVED3, + PLATFORM_RESERVED4, + MAX_HW_TYPES +}; + +/* CM context types */ +enum cm_ctx_types { + CM_CTX_CONN_AG, + CM_CTX_CONN_ST, + CM_CTX_TASK_AG, + CM_CTX_TASK_ST, + NUM_CM_CTX_TYPES +}; + +/* Debug bus frame modes */ +enum dbg_bus_frame_modes { + DBG_BUS_FRAME_MODE_4ST = 0, /* 4 Storm dwords (no HW) */ + DBG_BUS_FRAME_MODE_2ST_2HW = 1, /* 2 Storm dwords, 2 HW dwords */ + DBG_BUS_FRAME_MODE_1ST_3HW = 2, /* 1 Storm dwords, 3 HW dwords */ + DBG_BUS_FRAME_MODE_4HW = 3, /* 4 HW dwords (no Storms) */ + DBG_BUS_FRAME_MODE_8HW = 4, /* 8 HW dwords (no Storms) */ + DBG_BUS_NUM_FRAME_MODES +}; + +/* Debug bus SEMI frame modes */ +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */ + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */ + DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */ + DBG_BUS_SEMI_NUM_FRAME_MODES +}; + +/* Debug bus filter types */ +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */ + DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */ + DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */ + DBG_BUS_FILTER_TYPE_ON /* Filter always on */ +}; + +/* Debug bus pre-trigger recording types */ +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */ + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */ + DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */ +}; + +/* Debug bus post-trigger recording types */ +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */ + DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */ +}; + +/* Debug bus other engine mode */ +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX +}; + +/* DBG block Framing mode definitions */ +struct framing_mode_defs { + u8 id; + u8 blocks_dword_mask; + u8 storms_dword_mask; + u8 semi_framing_mode_id; + u8 full_buf_thr; +}; + +/* Chip constant definitions */ +struct chip_defs { + const char *name; + u8 dwords_per_cycle; + u8 num_framing_modes; + u32 num_ilt_pages; + struct framing_mode_defs *framing_modes; +}; + +/* HW type constant definitions */ +struct hw_type_defs { + const char *name; + u32 delay_factor; + u32 dmae_thresh; + u32 log_thresh; +}; + +/* RBC reset definitions */ +struct rbc_reset_defs { + u32 reset_reg_addr; + u32 reset_val[MAX_CHIP_IDS]; +}; + +/* Storm constant definitions. + * Addresses are in bytes, sizes are in quad-regs. + */ +struct storm_defs { + char letter; + enum block_id sem_block_id; + enum dbg_bus_clients dbg_client_id[MAX_CHIP_IDS]; + bool has_vfc; + u32 sem_fast_mem_addr; + u32 sem_frame_mode_addr; + u32 sem_slow_enable_addr; + u32 sem_slow_mode_addr; + u32 sem_slow_mode1_conf_addr; + u32 sem_sync_dbg_empty_addr; + u32 sem_gpre_vect_addr; + u32 cm_ctx_wr_addr; + u32 cm_ctx_rd_addr[NUM_CM_CTX_TYPES]; + u32 cm_ctx_lid_sizes[MAX_CHIP_IDS][NUM_CM_CTX_TYPES]; +}; + +/* Debug Bus Constraint operation constant definitions */ +struct dbg_bus_constraint_op_defs { + u8 hw_op_val; + bool is_cyclic; +}; + +/* Storm Mode definitions */ +struct storm_mode_defs { + const char *name; + bool is_fast_dbg; + u8 id_in_hw; + u32 src_disable_reg_addr; + u32 src_enable_val; + bool exists[MAX_CHIP_IDS]; +}; + +struct grc_param_defs { + u32 default_val[MAX_CHIP_IDS]; + u32 min; + u32 max; + bool is_preset; + bool is_persistent; + u32 exclude_all_preset_val; + u32 crash_preset_val[MAX_CHIP_IDS]; +}; + +/* Address is in 128b units. Width is in bits. */ +struct rss_mem_defs { + const char *mem_name; + const char *type_name; + u32 addr; + u32 entry_width; + u32 num_entries[MAX_CHIP_IDS]; +}; + +struct vfc_ram_defs { + const char *mem_name; + const char *type_name; + u32 base_row; + u32 num_rows; +}; + +struct big_ram_defs { + const char *instance_name; + enum mem_groups mem_group_id; + enum mem_groups ram_mem_group_id; + enum dbg_grc_params grc_param; + u32 addr_reg_addr; + u32 data_reg_addr; + u32 is_256b_reg_addr; + u32 is_256b_bit_offset[MAX_CHIP_IDS]; + u32 ram_size[MAX_CHIP_IDS]; /* In dwords */ +}; + +struct phy_defs { + const char *phy_name; + + /* PHY base GRC address */ + u32 base_addr; + + /* Relative address of indirect TBUS address register (bits 0..7) */ + u32 tbus_addr_lo_addr; + + /* Relative address of indirect TBUS address register (bits 8..10) */ + u32 tbus_addr_hi_addr; + + /* Relative address of indirect TBUS data register (bits 0..7) */ + u32 tbus_data_lo_addr; + + /* Relative address of indirect TBUS data register (bits 8..11) */ + u32 tbus_data_hi_addr; +}; + +/* Split type definitions */ +struct split_type_defs { + const char *name; +}; + +/******************************** Constants **********************************/ + +#define BYTES_IN_DWORD sizeof(u32) +/* In the macros below, size and offset are specified in bits */ +#define CEIL_DWORDS(size) DIV_ROUND_UP(size, 32) +#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET +#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE +#define FIELD_DWORD_OFFSET(type, field) \ + ((int)(FIELD_BIT_OFFSET(type, field) / 32)) +#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32) +#define FIELD_BIT_MASK(type, field) \ + (((1 << FIELD_BIT_SIZE(type, field)) - 1) << \ + FIELD_DWORD_SHIFT(type, field)) + +#define SET_VAR_FIELD(var, type, field, val) \ + do { \ + var[FIELD_DWORD_OFFSET(type, field)] &= \ + (~FIELD_BIT_MASK(type, field)); \ + var[FIELD_DWORD_OFFSET(type, field)] |= \ + (val) << FIELD_DWORD_SHIFT(type, field); \ + } while (0) + +#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ + do { \ + for (i = 0; i < (arr_size); i++) \ + qed_wr(dev, ptt, addr, (arr)[i]); \ + } while (0) + +#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) +#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) + +/* extra lines include a signature line + optional latency events line */ +#define NUM_EXTRA_DBG_LINES(block) \ + (GET_FIELD((block)->flags, DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS) ? 2 : 1) +#define NUM_DBG_LINES(block) \ + ((block)->num_of_dbg_bus_lines + NUM_EXTRA_DBG_LINES(block)) + +#define USE_DMAE true +#define PROTECT_WIDE_BUS true + +#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) +#define RAM_LINES_TO_BYTES(lines) \ + DWORDS_TO_BYTES(RAM_LINES_TO_DWORDS(lines)) + +#define REG_DUMP_LEN_SHIFT 24 +#define MEM_DUMP_ENTRY_SIZE_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct dbg_dump_mem)) + +#define IDLE_CHK_RULE_SIZE_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_rule)) + +#define IDLE_CHK_RESULT_HDR_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_hdr)) + +#define IDLE_CHK_RESULT_REG_HDR_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct dbg_idle_chk_result_reg_hdr)) + +#define PAGE_MEM_DESC_SIZE_DWORDS \ + BYTES_TO_DWORDS(sizeof(struct phys_mem_desc)) + +#define IDLE_CHK_MAX_ENTRIES_SIZE 32 + +/* The sizes and offsets below are specified in bits */ +#define VFC_CAM_CMD_STRUCT_SIZE 64 +#define VFC_CAM_CMD_ROW_OFFSET 48 +#define VFC_CAM_CMD_ROW_SIZE 9 +#define VFC_CAM_ADDR_STRUCT_SIZE 16 +#define VFC_CAM_ADDR_OP_OFFSET 0 +#define VFC_CAM_ADDR_OP_SIZE 4 +#define VFC_CAM_RESP_STRUCT_SIZE 256 +#define VFC_RAM_ADDR_STRUCT_SIZE 16 +#define VFC_RAM_ADDR_OP_OFFSET 0 +#define VFC_RAM_ADDR_OP_SIZE 2 +#define VFC_RAM_ADDR_ROW_OFFSET 2 +#define VFC_RAM_ADDR_ROW_SIZE 10 +#define VFC_RAM_RESP_STRUCT_SIZE 256 + +#define VFC_CAM_CMD_DWORDS CEIL_DWORDS(VFC_CAM_CMD_STRUCT_SIZE) +#define VFC_CAM_ADDR_DWORDS CEIL_DWORDS(VFC_CAM_ADDR_STRUCT_SIZE) +#define VFC_CAM_RESP_DWORDS CEIL_DWORDS(VFC_CAM_RESP_STRUCT_SIZE) +#define VFC_RAM_CMD_DWORDS VFC_CAM_CMD_DWORDS +#define VFC_RAM_ADDR_DWORDS CEIL_DWORDS(VFC_RAM_ADDR_STRUCT_SIZE) +#define VFC_RAM_RESP_DWORDS CEIL_DWORDS(VFC_RAM_RESP_STRUCT_SIZE) + +#define NUM_VFC_RAM_TYPES 4 + +#define VFC_CAM_NUM_ROWS 512 + +#define VFC_OPCODE_CAM_RD 14 +#define VFC_OPCODE_RAM_RD 0 + +#define NUM_RSS_MEM_TYPES 5 + +#define NUM_BIG_RAM_TYPES 3 +#define BIG_RAM_NAME_LEN 3 + +#define NUM_PHY_TBUS_ADDRESSES 2048 +#define PHY_DUMP_SIZE_DWORDS (NUM_PHY_TBUS_ADDRESSES / 2) + +#define RESET_REG_UNRESET_OFFSET 4 + +#define STALL_DELAY_MS 500 + +#define STATIC_DEBUG_LINE_DWORDS 9 + +#define NUM_COMMON_GLOBAL_PARAMS 10 + +#define MAX_RECURSION_DEPTH 10 + +#define FW_IMG_KUKU 0 +#define FW_IMG_MAIN 1 +#define FW_IMG_L2B 2 + +#define REG_FIFO_ELEMENT_DWORDS 2 +#define REG_FIFO_DEPTH_ELEMENTS 32 +#define REG_FIFO_DEPTH_DWORDS \ + (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) + +#define IGU_FIFO_ELEMENT_DWORDS 4 +#define IGU_FIFO_DEPTH_ELEMENTS 64 +#define IGU_FIFO_DEPTH_DWORDS \ + (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) + +#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 +#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 +#define PROTECTION_OVERRIDE_DEPTH_DWORDS \ + (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ + PROTECTION_OVERRIDE_ELEMENT_DWORDS) + +#define MCP_SPAD_TRACE_OFFSIZE_ADDR \ + (MCP_REG_SCRATCH + \ + offsetof(struct static_init, sections[SPAD_SECTION_TRACE])) + +#define MAX_SW_PLTAFORM_STR_SIZE 64 + +#define EMPTY_FW_VERSION_STR "???_???_???_???" +#define EMPTY_FW_IMAGE_STR "???????????????" + +/***************************** Constant Arrays *******************************/ + +/* DBG block framing mode definitions, in descending preference order */ +static struct framing_mode_defs s_framing_mode_defs[4] = { + {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf, + DBG_BUS_SEMI_FRAME_MODE_4FAST, + 10}, + {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW, + 10}, + {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc, + DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10}, + {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8, + DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10} +}; + +/* Chip constant definitions array */ +static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = { + {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2, + s_framing_mode_defs}, + {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2, + s_framing_mode_defs} +}; + +/* Storm constant definitions array */ +static struct storm_defs s_storm_defs[] = { + /* Tstorm */ + {'T', BLOCK_TSEM, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT}, + true, + TSEM_REG_FAST_MEMORY, + TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE, + TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG, + TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT, + TCM_REG_CTX_RBC_ACCS, + {TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX, + TCM_REG_SM_TASK_CTX}, + {{4, 16, 2, 4}, {4, 16, 2, 4}} /* {bb} {k2} */ + }, + + /* Mstorm */ + {'M', BLOCK_MSEM, + {DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM}, + false, + MSEM_REG_FAST_MEMORY, + MSEM_REG_DBG_FRAME_MODE, + MSEM_REG_SLOW_DBG_ACTIVE, + MSEM_REG_SLOW_DBG_MODE, + MSEM_REG_DBG_MODE1_CFG, + MSEM_REG_SYNC_DBG_EMPTY, + MSEM_REG_DBG_GPRE_VECT, + MCM_REG_CTX_RBC_ACCS, + {MCM_REG_AGG_CON_CTX, MCM_REG_SM_CON_CTX, MCM_REG_AGG_TASK_CTX, + MCM_REG_SM_TASK_CTX }, + {{1, 10, 2, 7}, {1, 10, 2, 7}} /* {bb} {k2}*/ + }, + + /* Ustorm */ + {'U', BLOCK_USEM, + {DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU}, + false, + USEM_REG_FAST_MEMORY, + USEM_REG_DBG_FRAME_MODE, + USEM_REG_SLOW_DBG_ACTIVE, + USEM_REG_SLOW_DBG_MODE, + USEM_REG_DBG_MODE1_CFG, + USEM_REG_SYNC_DBG_EMPTY, + USEM_REG_DBG_GPRE_VECT, + UCM_REG_CTX_RBC_ACCS, + {UCM_REG_AGG_CON_CTX, UCM_REG_SM_CON_CTX, UCM_REG_AGG_TASK_CTX, + UCM_REG_SM_TASK_CTX}, + {{2, 13, 3, 3}, {2, 13, 3, 3}} /* {bb} {k2} */ + }, + + /* Xstorm */ + {'X', BLOCK_XSEM, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX}, + false, + XSEM_REG_FAST_MEMORY, + XSEM_REG_DBG_FRAME_MODE, + XSEM_REG_SLOW_DBG_ACTIVE, + XSEM_REG_SLOW_DBG_MODE, + XSEM_REG_DBG_MODE1_CFG, + XSEM_REG_SYNC_DBG_EMPTY, + XSEM_REG_DBG_GPRE_VECT, + XCM_REG_CTX_RBC_ACCS, + {XCM_REG_AGG_CON_CTX, XCM_REG_SM_CON_CTX, 0, 0}, + {{9, 15, 0, 0}, {9, 15, 0, 0}} /* {bb} {k2} */ + }, + + /* Ystorm */ + {'Y', BLOCK_YSEM, + {DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY}, + false, + YSEM_REG_FAST_MEMORY, + YSEM_REG_DBG_FRAME_MODE, + YSEM_REG_SLOW_DBG_ACTIVE, + YSEM_REG_SLOW_DBG_MODE, + YSEM_REG_DBG_MODE1_CFG, + YSEM_REG_SYNC_DBG_EMPTY, + YSEM_REG_DBG_GPRE_VECT, + YCM_REG_CTX_RBC_ACCS, + {YCM_REG_AGG_CON_CTX, YCM_REG_SM_CON_CTX, YCM_REG_AGG_TASK_CTX, + YCM_REG_SM_TASK_CTX}, + {{2, 3, 2, 12}, {2, 3, 2, 12}} /* {bb} {k2} */ + }, + + /* Pstorm */ + {'P', BLOCK_PSEM, + {DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS}, + true, + PSEM_REG_FAST_MEMORY, + PSEM_REG_DBG_FRAME_MODE, + PSEM_REG_SLOW_DBG_ACTIVE, + PSEM_REG_SLOW_DBG_MODE, + PSEM_REG_DBG_MODE1_CFG, + PSEM_REG_SYNC_DBG_EMPTY, + PSEM_REG_DBG_GPRE_VECT, + PCM_REG_CTX_RBC_ACCS, + {0, PCM_REG_SM_CON_CTX, 0, 0}, + {{0, 10, 0, 0}, {0, 10, 0, 0}} /* {bb} {k2} */ + }, +}; + +static struct hw_type_defs s_hw_type_defs[] = { + /* HW_TYPE_ASIC */ + {"asic", 1, 256, 32768}, + {"reserved", 0, 0, 0}, + {"reserved2", 0, 0, 0}, + {"reserved3", 0, 0, 0}, + {"reserved4", 0, 0, 0} +}; + +static struct grc_param_defs s_grc_param_defs[] = { + /* DBG_GRC_PARAM_DUMP_TSTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_MSTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_USTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_XSTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_YSTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_PSTORM */ + {{1, 1}, 0, 1, false, false, 1, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_REGS */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_RAM */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_PBUF */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_IOR */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_VFC */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_CM_CTX */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_ILT */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_RSS */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_CAU */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_QM */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_MCP */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_DORQ */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_CFC */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_IGU */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_BRB */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_BTB */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_BMB */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_RESERVED1 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_MULD */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_PRS */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_DMAE */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_TM */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_SDM */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_DIF */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_STATIC */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_UNSTALL */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_RESERVED2 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_MCP_TRACE_META_SIZE */ + {{0, 0}, 1, 0xffffffff, false, true, 0, {0, 0}}, + + /* DBG_GRC_PARAM_EXCLUDE_ALL */ + {{0, 0}, 0, 1, true, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_CRASH */ + {{0, 0}, 0, 1, true, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_PARITY_SAFE */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_CM */ + {{1, 1}, 0, 1, false, false, 0, {1, 1}}, + + /* DBG_GRC_PARAM_DUMP_PHY */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_NO_MCP */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_NO_FW_VER */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_RESERVED3 */ + {{0, 0}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_MCP_HW_DUMP */ + {{0, 1}, 0, 1, false, false, 0, {0, 1}}, + + /* DBG_GRC_PARAM_DUMP_ILT_CDUC */ + {{1, 1}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_ILT_CDUT */ + {{1, 1}, 0, 1, false, false, 0, {0, 0}}, + + /* DBG_GRC_PARAM_DUMP_CAU_EXT */ + {{0, 0}, 0, 1, false, false, 0, {1, 1}} +}; + +static struct rss_mem_defs s_rss_mem_defs[] = { + {"rss_mem_cid", "rss_cid", 0, 32, + {256, 320}}, + + {"rss_mem_key_msb", "rss_key", 1024, 256, + {128, 208}}, + + {"rss_mem_key_lsb", "rss_key", 2048, 64, + {128, 208}}, + + {"rss_mem_info", "rss_info", 3072, 16, + {128, 208}}, + + {"rss_mem_ind", "rss_ind", 4096, 16, + {16384, 26624}} +}; + +static struct vfc_ram_defs s_vfc_ram_defs[] = { + {"vfc_ram_tt1", "vfc_ram", 0, 512}, + {"vfc_ram_mtt2", "vfc_ram", 512, 128}, + {"vfc_ram_stt2", "vfc_ram", 640, 32}, + {"vfc_ram_ro_vect", "vfc_ram", 672, 32} +}; + +static struct big_ram_defs s_big_ram_defs[] = { + {"BRB", MEM_GROUP_BRB_MEM, MEM_GROUP_BRB_RAM, DBG_GRC_PARAM_DUMP_BRB, + BRB_REG_BIG_RAM_ADDRESS, BRB_REG_BIG_RAM_DATA, + MISC_REG_BLOCK_256B_EN, {0, 0}, + {153600, 180224}}, + + {"BTB", MEM_GROUP_BTB_MEM, MEM_GROUP_BTB_RAM, DBG_GRC_PARAM_DUMP_BTB, + BTB_REG_BIG_RAM_ADDRESS, BTB_REG_BIG_RAM_DATA, + MISC_REG_BLOCK_256B_EN, {0, 1}, + {92160, 117760}}, + + {"BMB", MEM_GROUP_BMB_MEM, MEM_GROUP_BMB_RAM, DBG_GRC_PARAM_DUMP_BMB, + BMB_REG_BIG_RAM_ADDRESS, BMB_REG_BIG_RAM_DATA, + MISCS_REG_BLOCK_256B_EN, {0, 0}, + {36864, 36864}} +}; + +static struct rbc_reset_defs s_rbc_reset_defs[] = { + {MISCS_REG_RESET_PL_HV, + {0x0, 0x400}}, + {MISC_REG_RESET_PL_PDA_VMAIN_1, + {0x4404040, 0x4404040}}, + {MISC_REG_RESET_PL_PDA_VMAIN_2, + {0x7, 0x7c00007}}, + {MISC_REG_RESET_PL_PDA_VAUX, + {0x2, 0x2}}, +}; + +static struct phy_defs s_phy_defs[] = { + {"nw_phy", NWS_REG_NWS_CMU_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2, + PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2}, + {"sgmii_phy", MS_REG_MS_CMU_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy0", PHY_PCIE_REG_PHY0_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, + {"pcie_phy1", PHY_PCIE_REG_PHY1_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2, + PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2}, +}; + +static struct split_type_defs s_split_type_defs[] = { + /* SPLIT_TYPE_NONE */ + {"eng"}, + + /* SPLIT_TYPE_PORT */ + {"port"}, + + /* SPLIT_TYPE_PF */ + {"pf"}, + + /* SPLIT_TYPE_PORT_PF */ + {"port"}, + + /* SPLIT_TYPE_VF */ + {"vf"} +}; + +/******************************** Variables **********************************/ + +/* The version of the calling app */ +static u32 s_app_ver; + +/**************************** Private Functions ******************************/ + +static void qed_static_asserts(void) +{ +} + +/* Reads and returns a single dword from the specified unaligned buffer */ +static u32 qed_read_unaligned_dword(u8 *buf) +{ + u32 dword; + + memcpy((u8 *)&dword, buf, sizeof(dword)); + return dword; +} + +/* Sets the value of the specified GRC param */ +static void qed_grc_set_param(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + dev_data->grc.param_val[grc_param] = val; +} + +/* Returns the value of the specified GRC param */ +static u32 qed_grc_get_param(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return dev_data->grc.param_val[grc_param]; +} + +/* Initializes the GRC parameters */ +static void qed_dbg_grc_init_params(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + if (!dev_data->grc.params_initialized) { + qed_dbg_grc_set_params_default(p_hwfn); + dev_data->grc.params_initialized = 1; + } +} + +/* Sets pointer and size for the specified binary buffer type */ +static void qed_set_dbg_bin_buf(struct qed_hwfn *p_hwfn, + enum bin_dbg_buffer_type buf_type, + const u32 *ptr, u32 size) +{ + struct virt_mem_desc *buf = &p_hwfn->dbg_arrays[buf_type]; + + buf->ptr = (void *)ptr; + buf->size = size; +} + +/* Initializes debug data for the specified device */ +static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 num_pfs = 0, max_pfs_per_port = 0; + + if (dev_data->initialized) + return DBG_STATUS_OK; + + if (!s_app_ver) + return DBG_STATUS_APP_VERSION_NOT_SET; + + /* Set chip */ + if (QED_IS_K2(p_hwfn->cdev)) { + dev_data->chip_id = CHIP_K2; + dev_data->mode_enable[MODE_K2] = 1; + dev_data->num_vfs = MAX_NUM_VFS_K2; + num_pfs = MAX_NUM_PFS_K2; + max_pfs_per_port = MAX_NUM_PFS_K2 / 2; + } else if (QED_IS_BB_B0(p_hwfn->cdev)) { + dev_data->chip_id = CHIP_BB; + dev_data->mode_enable[MODE_BB] = 1; + dev_data->num_vfs = MAX_NUM_VFS_BB; + num_pfs = MAX_NUM_PFS_BB; + max_pfs_per_port = MAX_NUM_PFS_BB; + } else { + return DBG_STATUS_UNKNOWN_CHIP; + } + + /* Set HW type */ + dev_data->hw_type = HW_TYPE_ASIC; + dev_data->mode_enable[MODE_ASIC] = 1; + + /* Set port mode */ + switch (p_hwfn->cdev->num_ports_in_engine) { + case 1: + dev_data->mode_enable[MODE_PORTS_PER_ENG_1] = 1; + break; + case 2: + dev_data->mode_enable[MODE_PORTS_PER_ENG_2] = 1; + break; + case 4: + dev_data->mode_enable[MODE_PORTS_PER_ENG_4] = 1; + break; + } + + /* Set 100G mode */ + if (QED_IS_CMT(p_hwfn->cdev)) + dev_data->mode_enable[MODE_100G] = 1; + + /* Set number of ports */ + if (dev_data->mode_enable[MODE_PORTS_PER_ENG_1] || + dev_data->mode_enable[MODE_100G]) + dev_data->num_ports = 1; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_2]) + dev_data->num_ports = 2; + else if (dev_data->mode_enable[MODE_PORTS_PER_ENG_4]) + dev_data->num_ports = 4; + + /* Set number of PFs per port */ + dev_data->num_pfs_per_port = min_t(u32, + num_pfs / dev_data->num_ports, + max_pfs_per_port); + + /* Initializes the GRC parameters */ + qed_dbg_grc_init_params(p_hwfn); + + dev_data->use_dmae = true; + dev_data->initialized = 1; + + return DBG_STATUS_OK; +} + +static const struct dbg_block *get_dbg_block(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + const struct dbg_block *dbg_block; + + dbg_block = p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS].ptr; + return dbg_block + block_id; +} + +static const struct dbg_block_chip *qed_get_dbg_block_per_chip(struct qed_hwfn + *p_hwfn, + enum block_id + block_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return (const struct dbg_block_chip *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_CHIP_DATA].ptr + + block_id * MAX_CHIP_IDS + dev_data->chip_id; +} + +static const struct dbg_reset_reg *qed_get_dbg_reset_reg(struct qed_hwfn + *p_hwfn, + u8 reset_reg_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + + return (const struct dbg_reset_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_RESET_REGS].ptr + + reset_reg_id * MAX_CHIP_IDS + dev_data->chip_id; +} + +/* Reads the FW info structure for the specified Storm from the chip, + * and writes it to the specified fw_info pointer. + */ +static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 storm_id, struct fw_info *fw_info) +{ + struct storm_defs *storm = &s_storm_defs[storm_id]; + struct fw_info_location fw_info_location; + u32 addr, i, size, *dest; + + memset(&fw_info_location, 0, sizeof(fw_info_location)); + memset(fw_info, 0, sizeof(*fw_info)); + + /* Read first the address that points to fw_info location. + * The address is located in the last line of the Storm RAM. + */ + addr = storm->sem_fast_mem_addr + SEM_FAST_REG_INT_RAM + + DWORDS_TO_BYTES(SEM_FAST_REG_INT_RAM_SIZE) - + sizeof(fw_info_location); + + dest = (u32 *)&fw_info_location; + size = BYTES_TO_DWORDS(sizeof(fw_info_location)); + + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) + dest[i] = qed_rd(p_hwfn, p_ptt, addr); + + /* Read FW version info from Storm RAM */ + size = le32_to_cpu(fw_info_location.size); + if (!size || size > sizeof(*fw_info)) + return; + + addr = le32_to_cpu(fw_info_location.grc_addr); + dest = (u32 *)fw_info; + size = BYTES_TO_DWORDS(size); + + for (i = 0; i < size; i++, addr += BYTES_IN_DWORD) + dest[i] = qed_rd(p_hwfn, p_ptt, addr); +} + +/* Dumps the specified string to the specified buffer. + * Returns the dumped size in bytes. + */ +static u32 qed_dump_str(char *dump_buf, bool dump, const char *str) +{ + if (dump) + strcpy(dump_buf, str); + + return (u32)strlen(str) + 1; +} + +/* Dumps zeros to align the specified buffer to dwords. + * Returns the dumped size in bytes. + */ +static u32 qed_dump_align(char *dump_buf, bool dump, u32 byte_offset) +{ + u8 offset_in_dword, align_size; + + offset_in_dword = (u8)(byte_offset & 0x3); + align_size = offset_in_dword ? BYTES_IN_DWORD - offset_in_dword : 0; + + if (dump && align_size) + memset(dump_buf, 0, align_size); + + return align_size; +} + +/* Writes the specified string param to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_str_param(u32 *dump_buf, + bool dump, + const char *param_name, const char *param_val) +{ + char *char_buf = (char *)dump_buf; + u32 offset = 0; + + /* Dump param name */ + offset += qed_dump_str(char_buf + offset, dump, param_name); + + /* Indicate a string param value */ + if (dump) + *(char_buf + offset) = 1; + offset++; + + /* Dump param value */ + offset += qed_dump_str(char_buf + offset, dump, param_val); + + /* Align buffer to next dword */ + offset += qed_dump_align(char_buf + offset, dump, offset); + + return BYTES_TO_DWORDS(offset); +} + +/* Writes the specified numeric param to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_num_param(u32 *dump_buf, + bool dump, const char *param_name, u32 param_val) +{ + char *char_buf = (char *)dump_buf; + u32 offset = 0; + + /* Dump param name */ + offset += qed_dump_str(char_buf + offset, dump, param_name); + + /* Indicate a numeric param value */ + if (dump) + *(char_buf + offset) = 0; + offset++; + + /* Align buffer to next dword */ + offset += qed_dump_align(char_buf + offset, dump, offset); + + /* Dump param value (and change offset from bytes to dwords) */ + offset = BYTES_TO_DWORDS(offset); + if (dump) + *(dump_buf + offset) = param_val; + offset++; + + return offset; +} + +/* Reads the FW version and writes it as a param to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + char fw_ver_str[16] = EMPTY_FW_VERSION_STR; + char fw_img_str[16] = EMPTY_FW_IMAGE_STR; + struct fw_info fw_info = { {0}, {0} }; + u32 offset = 0; + + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { + /* Read FW info from chip */ + qed_read_fw_info(p_hwfn, p_ptt, &fw_info); + + /* Create FW version/image strings */ + if (snprintf(fw_ver_str, sizeof(fw_ver_str), + "%d_%d_%d_%d", fw_info.ver.num.major, + fw_info.ver.num.minor, fw_info.ver.num.rev, + fw_info.ver.num.eng) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid FW version string\n"); + switch (fw_info.ver.image_id) { + case FW_IMG_KUKU: + strcpy(fw_img_str, "kuku"); + break; + case FW_IMG_MAIN: + strcpy(fw_img_str, "main"); + break; + case FW_IMG_L2B: + strcpy(fw_img_str, "l2b"); + break; + default: + strcpy(fw_img_str, "unknown"); + break; + } + } + + /* Dump FW version, image and timestamp */ + offset += qed_dump_str_param(dump_buf + offset, + dump, "fw-version", fw_ver_str); + offset += qed_dump_str_param(dump_buf + offset, + dump, "fw-image", fw_img_str); + offset += qed_dump_num_param(dump_buf + offset, dump, "fw-timestamp", + le32_to_cpu(fw_info.ver.timestamp)); + + return offset; +} + +/* Reads the MFW version and writes it as a param to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_mfw_ver_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + char mfw_ver_str[16] = EMPTY_FW_VERSION_STR; + + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_FW_VER)) { + u32 global_section_offsize, global_section_addr, mfw_ver; + u32 public_data_addr, global_section_offsize_addr; + + /* Find MCP public data GRC address. Needs to be ORed with + * MCP_REG_SCRATCH due to a HW bug. + */ + public_data_addr = qed_rd(p_hwfn, + p_ptt, + MISC_REG_SHARED_MEM_ADDR) | + MCP_REG_SCRATCH; + + /* Find MCP public global section offset */ + global_section_offsize_addr = public_data_addr + + offsetof(struct mcp_public_data, + sections) + + sizeof(offsize_t) * PUBLIC_GLOBAL; + global_section_offsize = qed_rd(p_hwfn, p_ptt, + global_section_offsize_addr); + global_section_addr = + MCP_REG_SCRATCH + + (global_section_offsize & OFFSIZE_OFFSET_MASK) * 4; + + /* Read MFW version from MCP public global section */ + mfw_ver = qed_rd(p_hwfn, p_ptt, + global_section_addr + + offsetof(struct public_global, mfw_ver)); + + /* Dump MFW version param */ + if (snprintf(mfw_ver_str, sizeof(mfw_ver_str), "%d_%d_%d_%d", + (u8)(mfw_ver >> 24), (u8)(mfw_ver >> 16), + (u8)(mfw_ver >> 8), (u8)mfw_ver) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid MFW version string\n"); + } + + return qed_dump_str_param(dump_buf, dump, "mfw-version", mfw_ver_str); +} + +/* Reads the chip revision from the chip and writes it as a param to the + * specified buffer. Returns the dumped size in dwords. + */ +static u32 qed_dump_chip_revision_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + char param_str[3] = "??"; + + if (dev_data->hw_type == HW_TYPE_ASIC) { + u32 chip_rev, chip_metal; + + chip_rev = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); + chip_metal = qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); + + param_str[0] = 'a' + (u8)chip_rev; + param_str[1] = '0' + (u8)chip_metal; + } + + return qed_dump_str_param(dump_buf, dump, "chip-revision", param_str); +} + +/* Writes a section header to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_section_hdr(u32 *dump_buf, + bool dump, const char *name, u32 num_params) +{ + return qed_dump_num_param(dump_buf, dump, name, num_params); +} + +/* Writes the common global params to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u8 num_specific_global_params) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 offset = 0; + u8 num_params; + + /* Dump global params section header */ + num_params = NUM_COMMON_GLOBAL_PARAMS + num_specific_global_params + + (dev_data->chip_id == CHIP_BB ? 1 : 0); + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "global_params", num_params); + + /* Store params */ + offset += qed_dump_fw_ver_param(p_hwfn, p_ptt, dump_buf + offset, dump); + offset += qed_dump_mfw_ver_param(p_hwfn, + p_ptt, dump_buf + offset, dump); + offset += qed_dump_chip_revision_param(p_hwfn, + p_ptt, dump_buf + offset, dump); + offset += qed_dump_num_param(dump_buf + offset, + dump, "tools-version", TOOLS_VERSION); + offset += qed_dump_str_param(dump_buf + offset, + dump, + "chip", + s_chip_defs[dev_data->chip_id].name); + offset += qed_dump_str_param(dump_buf + offset, + dump, + "platform", + s_hw_type_defs[dev_data->hw_type].name); + offset += qed_dump_num_param(dump_buf + offset, + dump, "pci-func", p_hwfn->abs_pf_id); + offset += qed_dump_num_param(dump_buf + offset, + dump, "epoch", qed_get_epoch_time()); + if (dev_data->chip_id == CHIP_BB) + offset += qed_dump_num_param(dump_buf + offset, + dump, "path", QED_PATH_ID(p_hwfn)); + + return offset; +} + +/* Writes the "last" section (including CRC) to the specified buffer at the + * given offset. Returns the dumped size in dwords. + */ +static u32 qed_dump_last_section(u32 *dump_buf, u32 offset, bool dump) +{ + u32 start_offset = offset; + + /* Dump CRC section header */ + offset += qed_dump_section_hdr(dump_buf + offset, dump, "last", 0); + + /* Calculate CRC32 and add it to the dword after the "last" section */ + if (dump) + *(dump_buf + offset) = ~crc32(0xffffffff, + (u8 *)dump_buf, + DWORDS_TO_BYTES(offset)); + + offset++; + + return offset - start_offset; +} + +/* Update blocks reset state */ +static void qed_update_blocks_reset_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; + u8 rst_reg_id; + u32 blk_id; + + /* Read reset registers */ + for (rst_reg_id = 0; rst_reg_id < NUM_DBG_RESET_REGS; rst_reg_id++) { + const struct dbg_reset_reg *rst_reg; + bool rst_reg_removed; + u32 rst_reg_addr; + + rst_reg = qed_get_dbg_reset_reg(p_hwfn, rst_reg_id); + rst_reg_removed = GET_FIELD(rst_reg->data, + DBG_RESET_REG_IS_REMOVED); + rst_reg_addr = DWORDS_TO_BYTES(GET_FIELD(rst_reg->data, + DBG_RESET_REG_ADDR)); + + if (!rst_reg_removed) + reg_val[rst_reg_id] = qed_rd(p_hwfn, p_ptt, + rst_reg_addr); + } + + /* Check if blocks are in reset */ + for (blk_id = 0; blk_id < NUM_PHYS_BLOCKS; blk_id++) { + const struct dbg_block_chip *blk; + bool has_rst_reg; + bool is_removed; + + blk = qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)blk_id); + is_removed = GET_FIELD(blk->flags, DBG_BLOCK_CHIP_IS_REMOVED); + has_rst_reg = GET_FIELD(blk->flags, + DBG_BLOCK_CHIP_HAS_RESET_REG); + + if (!is_removed && has_rst_reg) + dev_data->block_in_reset[blk_id] = + !(reg_val[blk->reset_reg_id] & + BIT(blk->reset_reg_bit_offset)); + } +} + +/* is_mode_match recursive function */ +static bool qed_is_mode_match_rec(struct qed_hwfn *p_hwfn, + u16 *modes_buf_offset, u8 rec_depth) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 *dbg_array; + bool arg1, arg2; + u8 tree_val; + + if (rec_depth > MAX_RECURSION_DEPTH) { + DP_NOTICE(p_hwfn, + "Unexpected error: is_mode_match_rec exceeded the max recursion depth. This is probably due to a corrupt init/debug buffer.\n"); + return false; + } + + /* Get next element from modes tree buffer */ + dbg_array = p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr; + tree_val = dbg_array[(*modes_buf_offset)++]; + + switch (tree_val) { + case INIT_MODE_OP_NOT: + return !qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + case INIT_MODE_OP_OR: + case INIT_MODE_OP_AND: + arg1 = qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + arg2 = qed_is_mode_match_rec(p_hwfn, + modes_buf_offset, rec_depth + 1); + return (tree_val == INIT_MODE_OP_OR) ? (arg1 || + arg2) : (arg1 && arg2); + default: + return dev_data->mode_enable[tree_val - MAX_INIT_MODE_OPS] > 0; + } +} + +/* Returns true if the mode (specified using modes_buf_offset) is enabled */ +static bool qed_is_mode_match(struct qed_hwfn *p_hwfn, u16 *modes_buf_offset) +{ + return qed_is_mode_match_rec(p_hwfn, modes_buf_offset, 0); +} + +/* Enable / disable the Debug block */ +static void qed_bus_enable_dbg_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable) +{ + qed_wr(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON, enable ? 1 : 0); +} + +/* Resets the Debug block */ +static void qed_bus_reset_dbg_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 reset_reg_addr, old_reset_reg_val, new_reset_reg_val; + const struct dbg_reset_reg *reset_reg; + const struct dbg_block_chip *block; + + block = qed_get_dbg_block_per_chip(p_hwfn, BLOCK_DBG); + reset_reg = qed_get_dbg_reset_reg(p_hwfn, block->reset_reg_id); + reset_reg_addr = + DWORDS_TO_BYTES(GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR)); + + old_reset_reg_val = qed_rd(p_hwfn, p_ptt, reset_reg_addr); + new_reset_reg_val = + old_reset_reg_val & ~BIT(block->reset_reg_bit_offset); + + qed_wr(p_hwfn, p_ptt, reset_reg_addr, new_reset_reg_val); + qed_wr(p_hwfn, p_ptt, reset_reg_addr, old_reset_reg_val); +} + +/* Enable / disable Debug Bus clients according to the specified mask + * (1 = enable, 0 = disable). + */ +static void qed_bus_enable_clients(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 client_mask) +{ + qed_wr(p_hwfn, p_ptt, DBG_REG_CLIENT_ENABLE, client_mask); +} + +static void qed_bus_config_dbg_line(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block_id, + u8 line_id, + u8 enable_mask, + u8 right_shift, + u8 force_valid_mask, u8 force_frame_mask) +{ + const struct dbg_block_chip *block = + qed_get_dbg_block_per_chip(p_hwfn, block_id); + + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_select_reg_addr), + line_id); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_dword_enable_reg_addr), + enable_mask); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_shift_reg_addr), + right_shift); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_valid_reg_addr), + force_valid_mask); + qed_wr(p_hwfn, p_ptt, DWORDS_TO_BYTES(block->dbg_force_frame_reg_addr), + force_frame_mask); +} + +/* Disable debug bus in all blocks */ +static void qed_bus_disable_blocks(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 block_id; + + /* Disable all blocks */ + for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + const struct dbg_block_chip *block_per_chip = + qed_get_dbg_block_per_chip(p_hwfn, + (enum block_id)block_id); + + if (GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_IS_REMOVED) || + dev_data->block_in_reset[block_id]) + continue; + + /* Disable debug bus */ + if (GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_HAS_DBG_BUS)) { + u32 dbg_en_addr = + block_per_chip->dbg_dword_enable_reg_addr; + u16 modes_buf_offset = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + bool eval_mode = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + + if (!eval_mode || + qed_is_mode_match(p_hwfn, &modes_buf_offset)) + qed_wr(p_hwfn, p_ptt, + DWORDS_TO_BYTES(dbg_en_addr), + 0); + } + } +} + +/* Returns true if the specified entity (indicated by GRC param) should be + * included in the dump, false otherwise. + */ +static bool qed_grc_is_included(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param) +{ + return qed_grc_get_param(p_hwfn, grc_param) > 0; +} + +/* Returns the storm_id that matches the specified Storm letter, + * or MAX_DBG_STORMS if invalid storm letter. + */ +static enum dbg_storms qed_get_id_from_letter(char storm_letter) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) + if (s_storm_defs[storm_id].letter == storm_letter) + return (enum dbg_storms)storm_id; + + return MAX_DBG_STORMS; +} + +/* Returns true of the specified Storm should be included in the dump, false + * otherwise. + */ +static bool qed_grc_is_storm_included(struct qed_hwfn *p_hwfn, + enum dbg_storms storm) +{ + return qed_grc_get_param(p_hwfn, (enum dbg_grc_params)storm) > 0; +} + +/* Returns true if the specified memory should be included in the dump, false + * otherwise. + */ +static bool qed_grc_is_mem_included(struct qed_hwfn *p_hwfn, + enum block_id block_id, u8 mem_group_id) +{ + const struct dbg_block *block; + u8 i; + + block = get_dbg_block(p_hwfn, block_id); + + /* If the block is associated with a Storm, check Storm match */ + if (block->associated_storm_letter) { + enum dbg_storms associated_storm_id = + qed_get_id_from_letter(block->associated_storm_letter); + + if (associated_storm_id == MAX_DBG_STORMS || + !qed_grc_is_storm_included(p_hwfn, associated_storm_id)) + return false; + } + + for (i = 0; i < NUM_BIG_RAM_TYPES; i++) { + struct big_ram_defs *big_ram = &s_big_ram_defs[i]; + + if (mem_group_id == big_ram->mem_group_id || + mem_group_id == big_ram->ram_mem_group_id) + return qed_grc_is_included(p_hwfn, big_ram->grc_param); + } + + switch (mem_group_id) { + case MEM_GROUP_PXP_ILT: + case MEM_GROUP_PXP_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PXP); + case MEM_GROUP_RAM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RAM); + case MEM_GROUP_PBUF: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PBUF); + case MEM_GROUP_CAU_MEM: + case MEM_GROUP_CAU_SB: + case MEM_GROUP_CAU_PI: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU); + case MEM_GROUP_CAU_MEM_EXT: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CAU_EXT); + case MEM_GROUP_QM_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_QM); + case MEM_GROUP_CFC_MEM: + case MEM_GROUP_CONN_CFC_MEM: + case MEM_GROUP_TASK_CFC_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CFC) || + qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX); + case MEM_GROUP_DORQ_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DORQ); + case MEM_GROUP_IGU_MEM: + case MEM_GROUP_IGU_MSIX: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IGU); + case MEM_GROUP_MULD_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MULD); + case MEM_GROUP_PRS_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_PRS); + case MEM_GROUP_DMAE_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DMAE); + case MEM_GROUP_TM_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_TM); + case MEM_GROUP_SDM_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_SDM); + case MEM_GROUP_TDIF_CTX: + case MEM_GROUP_RDIF_CTX: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_DIF); + case MEM_GROUP_CM_MEM: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM); + case MEM_GROUP_IOR: + return qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_IOR); + default: + return true; + } +} + +/* Stalls all Storms */ +static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool stall) +{ + u32 reg_addr; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id)) + continue; + + reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr + + SEM_FAST_REG_STALL_0; + qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0); + } + + msleep(STALL_DELAY_MS); +} + +/* Takes all blocks out of reset. If rbc_only is true, only RBC clients are + * taken out of reset. + */ +static void qed_grc_unreset_blocks(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool rbc_only) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 chip_id = dev_data->chip_id; + u32 i; + + /* Take RBCs out of reset */ + for (i = 0; i < ARRAY_SIZE(s_rbc_reset_defs); i++) + if (s_rbc_reset_defs[i].reset_val[dev_data->chip_id]) + qed_wr(p_hwfn, + p_ptt, + s_rbc_reset_defs[i].reset_reg_addr + + RESET_REG_UNRESET_OFFSET, + s_rbc_reset_defs[i].reset_val[chip_id]); + + if (!rbc_only) { + u32 reg_val[NUM_DBG_RESET_REGS] = { 0 }; + u8 reset_reg_id; + u32 block_id; + + /* Fill reset regs values */ + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { + bool is_removed, has_reset_reg, unreset_before_dump; + const struct dbg_block_chip *block; + + block = qed_get_dbg_block_per_chip(p_hwfn, + (enum block_id) + block_id); + is_removed = + GET_FIELD(block->flags, DBG_BLOCK_CHIP_IS_REMOVED); + has_reset_reg = + GET_FIELD(block->flags, + DBG_BLOCK_CHIP_HAS_RESET_REG); + unreset_before_dump = + GET_FIELD(block->flags, + DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP); + + if (!is_removed && has_reset_reg && unreset_before_dump) + reg_val[block->reset_reg_id] |= + BIT(block->reset_reg_bit_offset); + } + + /* Write reset registers */ + for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; + reset_reg_id++) { + const struct dbg_reset_reg *reset_reg; + u32 reset_reg_addr; + + reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); + + if (GET_FIELD + (reset_reg->data, DBG_RESET_REG_IS_REMOVED)) + continue; + + if (reg_val[reset_reg_id]) { + reset_reg_addr = + GET_FIELD(reset_reg->data, + DBG_RESET_REG_ADDR); + qed_wr(p_hwfn, + p_ptt, + DWORDS_TO_BYTES(reset_reg_addr) + + RESET_REG_UNRESET_OFFSET, + reg_val[reset_reg_id]); + } + } + } +} + +/* Returns the attention block data of the specified block */ +static const struct dbg_attn_block_type_data * +qed_get_block_attn_data(struct qed_hwfn *p_hwfn, + enum block_id block_id, enum dbg_attn_type attn_type) +{ + const struct dbg_attn_block *base_attn_block_arr = + (const struct dbg_attn_block *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr; + + return &base_attn_block_arr[block_id].per_type_data[attn_type]; +} + +/* Returns the attention registers of the specified block */ +static const struct dbg_attn_reg * +qed_get_block_attn_regs(struct qed_hwfn *p_hwfn, + enum block_id block_id, enum dbg_attn_type attn_type, + u8 *num_attn_regs) +{ + const struct dbg_attn_block_type_data *block_type_data = + qed_get_block_attn_data(p_hwfn, block_id, attn_type); + + *num_attn_regs = block_type_data->num_regs; + + return (const struct dbg_attn_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr + + block_type_data->regs_offset; +} + +/* For each block, clear the status of all parities */ +static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + const struct dbg_attn_reg *attn_reg_arr; + u32 block_id, sts_clr_address; + u8 reg_idx, num_attn_regs; + + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { + if (dev_data->block_in_reset[block_id]) + continue; + + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + (enum block_id)block_id, + ATTN_TYPE_PARITY, + &num_attn_regs); + + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { + const struct dbg_attn_reg *reg_data = + &attn_reg_arr[reg_idx]; + u16 modes_buf_offset; + bool eval_mode; + + /* Check mode */ + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = + GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + + sts_clr_address = reg_data->sts_clr_address; + /* If Mode match: clear parity status */ + if (!eval_mode || + qed_is_mode_match(p_hwfn, &modes_buf_offset)) + qed_rd(p_hwfn, p_ptt, + DWORDS_TO_BYTES(sts_clr_address)); + } + } +} + +/* Finds the meta data image in NVRAM */ +static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 image_type, + u32 *nvram_offset_bytes, + u32 *nvram_size_bytes, + bool b_can_sleep) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_txn_size; + struct mcp_file_att file_att; + int nvm_result; + + /* Call NVRAM get file command */ + nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn, + p_ptt, + DRV_MSG_CODE_NVM_GET_FILE_ATT, + image_type, + &ret_mcp_resp, + &ret_mcp_param, + &ret_txn_size, + (u32 *)&file_att, + b_can_sleep); + + /* Check response */ + if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_GET_IMAGE_FAILED; + + /* Update return values */ + *nvram_offset_bytes = file_att.nvm_start_addr; + *nvram_size_bytes = file_att.len; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n", + image_type, *nvram_offset_bytes, *nvram_size_bytes); + + /* Check alignment */ + if (*nvram_size_bytes & 0x3) + return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE; + + return DBG_STATUS_OK; +} + +/* Reads data from NVRAM */ +static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_bytes, + u32 nvram_size_bytes, + u32 *ret_buf, + bool b_can_sleep) +{ + u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy; + s32 bytes_left = nvram_size_bytes; + u32 read_offset = 0, param = 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "nvram_read: reading image of size %d bytes from NVRAM\n", + nvram_size_bytes); + + do { + bytes_to_copy = + (bytes_left > + MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left; + + /* Call NVRAM read command */ + SET_MFW_FIELD(param, + DRV_MB_PARAM_NVM_OFFSET, + nvram_offset_bytes + read_offset); + SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy); + if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, param, + &ret_mcp_resp, + &ret_mcp_param, &ret_read_size, + (u32 *)((u8 *)ret_buf + read_offset), + b_can_sleep)) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Check response */ + if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK) + return DBG_STATUS_NVRAM_READ_FAILED; + + /* Update read offset */ + read_offset += ret_read_size; + bytes_left -= ret_read_size; + } while (bytes_left > 0); + + return DBG_STATUS_OK; +} + +/* Dumps GRC registers section header. Returns the dumped size in dwords. + * the following parameters are dumped: + * - count: no. of dumped entries + * - split_type: split type + * - split_id: split ID (dumped only if split_id != SPLIT_TYPE_NONE) + * - reg_type_name: register type name (dumped only if reg_type_name != NULL) + */ +static u32 qed_grc_dump_regs_hdr(u32 *dump_buf, + bool dump, + u32 num_reg_entries, + enum init_split_types split_type, + u8 split_id, const char *reg_type_name) +{ + u8 num_params = 2 + + (split_type != SPLIT_TYPE_NONE ? 1 : 0) + (reg_type_name ? 1 : 0); + u32 offset = 0; + + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "grc_regs", num_params); + offset += qed_dump_num_param(dump_buf + offset, + dump, "count", num_reg_entries); + offset += qed_dump_str_param(dump_buf + offset, + dump, "split", + s_split_type_defs[split_type].name); + if (split_type != SPLIT_TYPE_NONE) + offset += qed_dump_num_param(dump_buf + offset, + dump, "id", split_id); + if (reg_type_name) + offset += qed_dump_str_param(dump_buf + offset, + dump, "type", reg_type_name); + + return offset; +} + +/* Reads the specified registers into the specified buffer. + * The addr and len arguments are specified in dwords. + */ +void qed_read_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len) +{ + u32 i; + + for (i = 0; i < len; i++) + buf[i] = qed_rd(p_hwfn, p_ptt, DWORDS_TO_BYTES(addr + i)); +} + +/* Dumps the GRC registers in the specified address range. + * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_addr_range(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, + u8 split_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 port_id = 0, pf_id = 0, vf_id = 0; + bool read_using_dmae = false; + u32 thresh; + u16 fid; + + if (!dump) + return len; + + switch (split_type) { + case SPLIT_TYPE_PORT: + port_id = split_id; + break; + case SPLIT_TYPE_PF: + pf_id = split_id; + break; + case SPLIT_TYPE_PORT_PF: + port_id = split_id / dev_data->num_pfs_per_port; + pf_id = port_id + dev_data->num_ports * + (split_id % dev_data->num_pfs_per_port); + break; + case SPLIT_TYPE_VF: + vf_id = split_id; + break; + default: + break; + } + + /* Try reading using DMAE */ + if (dev_data->use_dmae && split_type != SPLIT_TYPE_VF && + (len >= s_hw_type_defs[dev_data->hw_type].dmae_thresh || + (PROTECT_WIDE_BUS && wide_bus))) { + struct qed_dmae_params dmae_params; + + /* Set DMAE params */ + memset(&dmae_params, 0, sizeof(dmae_params)); + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); + switch (split_type) { + case SPLIT_TYPE_PORT: + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, + 1); + dmae_params.port_id = port_id; + break; + case SPLIT_TYPE_PF: + SET_FIELD(dmae_params.flags, + QED_DMAE_PARAMS_SRC_PF_VALID, 1); + dmae_params.src_pfid = pf_id; + break; + case SPLIT_TYPE_PORT_PF: + SET_FIELD(dmae_params.flags, QED_DMAE_PARAMS_PORT_VALID, + 1); + SET_FIELD(dmae_params.flags, + QED_DMAE_PARAMS_SRC_PF_VALID, 1); + dmae_params.port_id = port_id; + dmae_params.src_pfid = pf_id; + break; + default: + break; + } + + /* Execute DMAE command */ + read_using_dmae = !qed_dmae_grc2host(p_hwfn, + p_ptt, + DWORDS_TO_BYTES(addr), + (u64)(uintptr_t)(dump_buf), + len, &dmae_params); + if (!read_using_dmae) { + dev_data->use_dmae = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed reading from chip using DMAE, using GRC instead\n"); + } + } + + if (read_using_dmae) + goto print_log; + + /* If not read using DMAE, read using GRC */ + + /* Set pretend */ + if (split_type != dev_data->pretend.split_type || + split_id != dev_data->pretend.split_id) { + switch (split_type) { + case SPLIT_TYPE_PORT: + qed_port_pretend(p_hwfn, p_ptt, port_id); + break; + case SPLIT_TYPE_PF: + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + pf_id); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + case SPLIT_TYPE_PORT_PF: + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + pf_id); + qed_port_fid_pretend(p_hwfn, p_ptt, port_id, fid); + break; + case SPLIT_TYPE_VF: + fid = FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFVALID, 1) + | FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_VFID, + vf_id); + qed_fid_pretend(p_hwfn, p_ptt, fid); + break; + default: + break; + } + + dev_data->pretend.split_type = (u8)split_type; + dev_data->pretend.split_id = split_id; + } + + /* Read registers using GRC */ + qed_read_regs(p_hwfn, p_ptt, dump_buf, addr, len); + +print_log: + /* Print log */ + dev_data->num_regs_read += len; + thresh = s_hw_type_defs[dev_data->hw_type].log_thresh; + if ((dev_data->num_regs_read / thresh) > + ((dev_data->num_regs_read - len) / thresh)) + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Dumped %d registers...\n", dev_data->num_regs_read); + + return len; +} + +/* Dumps GRC registers sequence header. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_reg_entry_hdr(u32 *dump_buf, + bool dump, u32 addr, u32 len) +{ + if (dump) + *dump_buf = addr | (len << REG_DUMP_LEN_SHIFT); + + return 1; +} + +/* Dumps GRC registers sequence. Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_reg_entry(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 addr, u32 len, bool wide_bus, + enum init_split_types split_type, u8 split_id) +{ + u32 offset = 0; + + offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, len); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, len, wide_bus, + split_type, split_id); + + return offset; +} + +/* Dumps GRC registers sequence with skip cycle. + * Returns the dumped size in dwords. + * - addr: start GRC address in dwords + * - total_len: total no. of dwords to dump + * - read_len: no. consecutive dwords to read + * - skip_len: no. of dwords to skip (and fill with zeros) + */ +static u32 qed_grc_dump_reg_entry_skip(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 addr, + u32 total_len, + u32 read_len, u32 skip_len) +{ + u32 offset = 0, reg_offset = 0; + + offset += qed_grc_dump_reg_entry_hdr(dump_buf, dump, addr, total_len); + + if (!dump) + return offset + total_len; + + while (reg_offset < total_len) { + u32 curr_len = min_t(u32, read_len, total_len - reg_offset); + + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, curr_len, false, + SPLIT_TYPE_NONE, 0); + reg_offset += curr_len; + addr += curr_len; + + if (reg_offset < total_len) { + curr_len = min_t(u32, skip_len, total_len - skip_len); + memset(dump_buf + offset, 0, DWORDS_TO_BYTES(curr_len)); + offset += curr_len; + reg_offset += curr_len; + addr += curr_len; + } + } + + return offset; +} + +/* Dumps GRC registers entries. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_regs_entries(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct virt_mem_desc input_regs_arr, + u32 *dump_buf, + bool dump, + enum init_split_types split_type, + u8 split_id, + bool block_enable[MAX_BLOCK_ID], + u32 *num_dumped_reg_entries) +{ + u32 i, offset = 0, input_offset = 0; + bool mode_match = true; + + *num_dumped_reg_entries = 0; + + while (input_offset < BYTES_TO_DWORDS(input_regs_arr.size)) { + const struct dbg_dump_cond_hdr *cond_hdr = + (const struct dbg_dump_cond_hdr *) + input_regs_arr.ptr + input_offset++; + u16 modes_buf_offset; + bool eval_mode; + + /* Check mode/block */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + if (eval_mode) { + modes_buf_offset = + GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + mode_match = qed_is_mode_match(p_hwfn, + &modes_buf_offset); + } + + if (!mode_match || !block_enable[cond_hdr->block_id]) { + input_offset += cond_hdr->data_size; + continue; + } + + for (i = 0; i < cond_hdr->data_size; i++, input_offset++) { + const struct dbg_dump_reg *reg = + (const struct dbg_dump_reg *) + input_regs_arr.ptr + input_offset; + u32 addr, len; + bool wide_bus; + + addr = GET_FIELD(reg->data, DBG_DUMP_REG_ADDRESS); + len = GET_FIELD(reg->data, DBG_DUMP_REG_LENGTH); + wide_bus = GET_FIELD(reg->data, DBG_DUMP_REG_WIDE_BUS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + wide_bus, + split_type, split_id); + (*num_dumped_reg_entries)++; + } + } + + return offset; +} + +/* Dumps GRC registers entries. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_split_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct virt_mem_desc input_regs_arr, + u32 *dump_buf, + bool dump, + bool block_enable[MAX_BLOCK_ID], + enum init_split_types split_type, + u8 split_id, const char *reg_type_name) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum init_split_types hdr_split_type = split_type; + u32 num_dumped_reg_entries, offset; + u8 hdr_split_id = split_id; + + /* In PORT_PF split type, print a port split header */ + if (split_type == SPLIT_TYPE_PORT_PF) { + hdr_split_type = SPLIT_TYPE_PORT; + hdr_split_id = split_id / dev_data->num_pfs_per_port; + } + + /* Calculate register dump header size (and skip it for now) */ + offset = qed_grc_dump_regs_hdr(dump_buf, + false, + 0, + hdr_split_type, + hdr_split_id, reg_type_name); + + /* Dump registers */ + offset += qed_grc_dump_regs_entries(p_hwfn, + p_ptt, + input_regs_arr, + dump_buf + offset, + dump, + split_type, + split_id, + block_enable, + &num_dumped_reg_entries); + + /* Write register dump header */ + if (dump && num_dumped_reg_entries > 0) + qed_grc_dump_regs_hdr(dump_buf, + dump, + num_dumped_reg_entries, + hdr_split_type, + hdr_split_id, reg_type_name); + + return num_dumped_reg_entries > 0 ? offset : 0; +} + +/* Dumps registers according to the input registers array. Returns the dumped + * size in dwords. + */ +static u32 qed_grc_dump_registers(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + bool block_enable[MAX_BLOCK_ID], + const char *reg_type_name) +{ + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG]; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 offset = 0, input_offset = 0; + + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { + const struct dbg_dump_split_hdr *split_hdr; + struct virt_mem_desc curr_input_regs_arr; + enum init_split_types split_type; + u16 split_count = 0; + u32 split_data_size; + u8 split_id; + + split_hdr = + (const struct dbg_dump_split_hdr *) + dbg_buf->ptr + input_offset++; + split_type = + GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_regs_arr.ptr = + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr + + input_offset; + curr_input_regs_arr.size = DWORDS_TO_BYTES(split_data_size); + + switch (split_type) { + case SPLIT_TYPE_NONE: + split_count = 1; + break; + case SPLIT_TYPE_PORT: + split_count = dev_data->num_ports; + break; + case SPLIT_TYPE_PF: + case SPLIT_TYPE_PORT_PF: + split_count = dev_data->num_ports * + dev_data->num_pfs_per_port; + break; + case SPLIT_TYPE_VF: + split_count = dev_data->num_vfs; + break; + default: + return 0; + } + + for (split_id = 0; split_id < split_count; split_id++) + offset += qed_grc_dump_split_data(p_hwfn, p_ptt, + curr_input_regs_arr, + dump_buf + offset, + dump, block_enable, + split_type, + split_id, + reg_type_name); + + input_offset += split_data_size; + } + + /* Cancel pretends (pretend to original PF) */ + if (dump) { + qed_fid_pretend(p_hwfn, p_ptt, + FIELD_VALUE(PXP_PRETEND_CONCRETE_FID_PFID, + p_hwfn->rel_pf_id)); + dev_data->pretend.split_type = SPLIT_TYPE_NONE; + dev_data->pretend.split_id = 0; + } + + return offset; +} + +/* Dump reset registers. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_reset_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + u32 offset = 0, num_regs = 0; + u8 reset_reg_id; + + /* Calculate header size */ + offset += qed_grc_dump_regs_hdr(dump_buf, + false, + 0, SPLIT_TYPE_NONE, 0, "RESET_REGS"); + + /* Write reset registers */ + for (reset_reg_id = 0; reset_reg_id < NUM_DBG_RESET_REGS; + reset_reg_id++) { + const struct dbg_reset_reg *reset_reg; + u32 reset_reg_addr; + + reset_reg = qed_get_dbg_reset_reg(p_hwfn, reset_reg_id); + + if (GET_FIELD(reset_reg->data, DBG_RESET_REG_IS_REMOVED)) + continue; + + reset_reg_addr = GET_FIELD(reset_reg->data, DBG_RESET_REG_ADDR); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + reset_reg_addr, + 1, false, SPLIT_TYPE_NONE, 0); + num_regs++; + } + + /* Write header */ + if (dump) + qed_grc_dump_regs_hdr(dump_buf, + true, num_regs, SPLIT_TYPE_NONE, + 0, "RESET_REGS"); + + return offset; +} + +/* Dump registers that are modified during GRC Dump and therefore must be + * dumped first. Returns the dumped size in dwords. + */ +static u32 qed_grc_dump_modified_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 block_id, offset = 0, stall_regs_offset; + const struct dbg_attn_reg *attn_reg_arr; + u8 storm_id, reg_idx, num_attn_regs; + u32 num_reg_entries = 0; + + /* Write empty header for attention registers */ + offset += qed_grc_dump_regs_hdr(dump_buf, + false, + 0, SPLIT_TYPE_NONE, 0, "ATTN_REGS"); + + /* Write parity registers */ + for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) { + if (dev_data->block_in_reset[block_id] && dump) + continue; + + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + (enum block_id)block_id, + ATTN_TYPE_PARITY, + &num_attn_regs); + + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { + const struct dbg_attn_reg *reg_data = + &attn_reg_arr[reg_idx]; + u16 modes_buf_offset; + bool eval_mode; + u32 addr; + + /* Check mode */ + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = + GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + if (eval_mode && + !qed_is_mode_match(p_hwfn, &modes_buf_offset)) + continue; + + /* Mode match: read & dump registers */ + addr = reg_data->mask_address; + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false, + SPLIT_TYPE_NONE, 0); + addr = GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, false, + SPLIT_TYPE_NONE, 0); + num_reg_entries += 2; + } + } + + /* Overwrite header for attention registers */ + if (dump) + qed_grc_dump_regs_hdr(dump_buf, + true, + num_reg_entries, + SPLIT_TYPE_NONE, 0, "ATTN_REGS"); + + /* Write empty header for stall registers */ + stall_regs_offset = offset; + offset += qed_grc_dump_regs_hdr(dump_buf, + false, 0, SPLIT_TYPE_NONE, 0, "REGS"); + + /* Write Storm stall status registers */ + for (storm_id = 0, num_reg_entries = 0; storm_id < MAX_DBG_STORMS; + storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 addr; + + if (dev_data->block_in_reset[storm->sem_block_id] && dump) + continue; + + addr = + BYTES_TO_DWORDS(storm->sem_fast_mem_addr + + SEM_FAST_REG_STALLED); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, + false, SPLIT_TYPE_NONE, 0); + num_reg_entries++; + } + + /* Overwrite header for stall registers */ + if (dump) + qed_grc_dump_regs_hdr(dump_buf + stall_regs_offset, + true, + num_reg_entries, + SPLIT_TYPE_NONE, 0, "REGS"); + + return offset; +} + +/* Dumps registers that can't be represented in the debug arrays */ +static u32 qed_grc_dump_special_regs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + u32 offset = 0, addr; + + offset += qed_grc_dump_regs_hdr(dump_buf, + dump, 2, SPLIT_TYPE_NONE, 0, "REGS"); + + /* Dump R/TDIF_REG_DEBUG_ERROR_INFO_SIZE (every 8'th register should be + * skipped). + */ + addr = BYTES_TO_DWORDS(RDIF_REG_DEBUG_ERROR_INFO); + offset += qed_grc_dump_reg_entry_skip(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + RDIF_REG_DEBUG_ERROR_INFO_SIZE, + 7, + 1); + addr = BYTES_TO_DWORDS(TDIF_REG_DEBUG_ERROR_INFO); + offset += + qed_grc_dump_reg_entry_skip(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + TDIF_REG_DEBUG_ERROR_INFO_SIZE, + 7, + 1); + + return offset; +} + +/* Dumps a GRC memory header (section and params). Returns the dumped size in + * dwords. The following parameters are dumped: + * - name: dumped only if it's not NULL. + * - addr: in dwords, dumped only if name is NULL. + * - len: in dwords, always dumped. + * - width: dumped if it's not zero. + * - packed: dumped only if it's not false. + * - mem_group: always dumped. + * - is_storm: true only if the memory is related to a Storm. + * - storm_letter: valid only if is_storm is true. + * + */ +static u32 qed_grc_dump_mem_hdr(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, + const char *name, + u32 addr, + u32 len, + u32 bit_width, + bool packed, + const char *mem_group, char storm_letter) +{ + u8 num_params = 3; + u32 offset = 0; + char buf[64]; + + if (!len) + DP_NOTICE(p_hwfn, + "Unexpected GRC Dump error: dumped memory size must be non-zero\n"); + + if (bit_width) + num_params++; + if (packed) + num_params++; + + /* Dump section header */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "grc_mem", num_params); + + if (name) { + /* Dump name */ + if (storm_letter) { + strcpy(buf, "?STORM_"); + buf[0] = storm_letter; + strcpy(buf + strlen(buf), name); + } else { + strcpy(buf, name); + } + + offset += qed_dump_str_param(dump_buf + offset, + dump, "name", buf); + } else { + /* Dump address */ + u32 addr_in_bytes = DWORDS_TO_BYTES(addr); + + offset += qed_dump_num_param(dump_buf + offset, + dump, "addr", addr_in_bytes); + } + + /* Dump len */ + offset += qed_dump_num_param(dump_buf + offset, dump, "len", len); + + /* Dump bit width */ + if (bit_width) + offset += qed_dump_num_param(dump_buf + offset, + dump, "width", bit_width); + + /* Dump packed */ + if (packed) + offset += qed_dump_num_param(dump_buf + offset, + dump, "packed", 1); + + /* Dump reg type */ + if (storm_letter) { + strcpy(buf, "?STORM_"); + buf[0] = storm_letter; + strcpy(buf + strlen(buf), mem_group); + } else { + strcpy(buf, mem_group); + } + + offset += qed_dump_str_param(dump_buf + offset, dump, "type", buf); + + return offset; +} + +/* Dumps a single GRC memory. If name is NULL, the memory is stored by address. + * Returns the dumped size in dwords. + * The addr and len arguments are specified in dwords. + */ +static u32 qed_grc_dump_mem(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + const char *name, + u32 addr, + u32 len, + bool wide_bus, + u32 bit_width, + bool packed, + const char *mem_group, char storm_letter) +{ + u32 offset = 0; + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + name, + addr, + len, + bit_width, + packed, mem_group, storm_letter); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, addr, len, wide_bus, + SPLIT_TYPE_NONE, 0); + + return offset; +} + +/* Dumps GRC memories entries. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_mem_entries(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct virt_mem_desc input_mems_arr, + u32 *dump_buf, bool dump) +{ + u32 i, offset = 0, input_offset = 0; + bool mode_match = true; + + while (input_offset < BYTES_TO_DWORDS(input_mems_arr.size)) { + const struct dbg_dump_cond_hdr *cond_hdr; + u16 modes_buf_offset; + u32 num_entries; + bool eval_mode; + + cond_hdr = + (const struct dbg_dump_cond_hdr *)input_mems_arr.ptr + + input_offset++; + num_entries = cond_hdr->data_size / MEM_DUMP_ENTRY_SIZE_DWORDS; + + /* Check required mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + if (eval_mode) { + modes_buf_offset = + GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + mode_match = qed_is_mode_match(p_hwfn, + &modes_buf_offset); + } + + if (!mode_match) { + input_offset += cond_hdr->data_size; + continue; + } + + for (i = 0; i < num_entries; + i++, input_offset += MEM_DUMP_ENTRY_SIZE_DWORDS) { + const struct dbg_dump_mem *mem = + (const struct dbg_dump_mem *)((u32 *) + input_mems_arr.ptr + + input_offset); + const struct dbg_block *block; + char storm_letter = 0; + u32 mem_addr, mem_len; + bool mem_wide_bus; + u8 mem_group_id; + + mem_group_id = GET_FIELD(mem->dword0, + DBG_DUMP_MEM_MEM_GROUP_ID); + if (mem_group_id >= MEM_GROUPS_NUM) { + DP_NOTICE(p_hwfn, "Invalid mem_group_id\n"); + return 0; + } + + if (!qed_grc_is_mem_included(p_hwfn, + (enum block_id) + cond_hdr->block_id, + mem_group_id)) + continue; + + mem_addr = GET_FIELD(mem->dword0, DBG_DUMP_MEM_ADDRESS); + mem_len = GET_FIELD(mem->dword1, DBG_DUMP_MEM_LENGTH); + mem_wide_bus = GET_FIELD(mem->dword1, + DBG_DUMP_MEM_WIDE_BUS); + + block = get_dbg_block(p_hwfn, + cond_hdr->block_id); + + /* If memory is associated with Storm, + * update storm details + */ + if (block->associated_storm_letter) + storm_letter = block->associated_storm_letter; + + /* Dump memory */ + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + NULL, + mem_addr, + mem_len, + mem_wide_bus, + 0, + false, + s_mem_group_names[mem_group_id], + storm_letter); + } + } + + return offset; +} + +/* Dumps GRC memories according to the input array dump_mem. + * Returns the dumped size in dwords. + */ +static u32 qed_grc_dump_memories(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM]; + u32 offset = 0, input_offset = 0; + + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { + const struct dbg_dump_split_hdr *split_hdr; + struct virt_mem_desc curr_input_mems_arr; + enum init_split_types split_type; + u32 split_data_size; + + split_hdr = + (const struct dbg_dump_split_hdr *)dbg_buf->ptr + + input_offset++; + split_type = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID); + split_data_size = GET_FIELD(split_hdr->hdr, + DBG_DUMP_SPLIT_HDR_DATA_SIZE); + curr_input_mems_arr.ptr = (u32 *)dbg_buf->ptr + input_offset; + curr_input_mems_arr.size = DWORDS_TO_BYTES(split_data_size); + + if (split_type == SPLIT_TYPE_NONE) + offset += qed_grc_dump_mem_entries(p_hwfn, + p_ptt, + curr_input_mems_arr, + dump_buf + offset, + dump); + else + DP_NOTICE(p_hwfn, + "Dumping split memories is currently not supported\n"); + + input_offset += split_data_size; + } + + return offset; +} + +/* Dumps GRC context data for the specified Storm. + * Returns the dumped size in dwords. + * The lid_size argument is specified in quad-regs. + */ +static u32 qed_grc_dump_ctx_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + const char *name, + u32 num_lids, + enum cm_ctx_types ctx_type, u8 storm_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 i, lid, lid_size, total_size; + u32 rd_reg_addr, offset = 0; + + /* Convert quad-regs to dwords */ + lid_size = storm->cm_ctx_lid_sizes[dev_data->chip_id][ctx_type] * 4; + + if (!lid_size) + return 0; + + total_size = num_lids * lid_size; + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + name, + 0, + total_size, + lid_size * 32, + false, name, storm->letter); + + if (!dump) + return offset + total_size; + + rd_reg_addr = BYTES_TO_DWORDS(storm->cm_ctx_rd_addr[ctx_type]); + + /* Dump context data */ + for (lid = 0; lid < num_lids; lid++) { + for (i = 0; i < lid_size; i++) { + qed_wr(p_hwfn, + p_ptt, storm->cm_ctx_wr_addr, (i << 9) | lid); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rd_reg_addr, + 1, + false, + SPLIT_TYPE_NONE, 0); + } + } + + return offset; +} + +/* Dumps GRC contexts. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_ctx(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + u32 offset = 0; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id)) + continue; + + /* Dump Conn AG context size */ + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "CONN_AG_CTX", + NUM_OF_LCIDS, + CM_CTX_CONN_AG, storm_id); + + /* Dump Conn ST context size */ + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "CONN_ST_CTX", + NUM_OF_LCIDS, + CM_CTX_CONN_ST, storm_id); + + /* Dump Task AG context size */ + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "TASK_AG_CTX", + NUM_OF_LTIDS, + CM_CTX_TASK_AG, storm_id); + + /* Dump Task ST context size */ + offset += qed_grc_dump_ctx_data(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + "TASK_ST_CTX", + NUM_OF_LTIDS, + CM_CTX_TASK_ST, storm_id); + } + + return offset; +} + +#define VFC_STATUS_RESP_READY_BIT 0 +#define VFC_STATUS_BUSY_BIT 1 +#define VFC_STATUS_SENDING_CMD_BIT 2 + +#define VFC_POLLING_DELAY_MS 1 +#define VFC_POLLING_COUNT 20 + +/* Reads data from VFC. Returns the number of dwords read (0 on error). + * Sizes are specified in dwords. + */ +static u32 qed_grc_dump_read_from_vfc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct storm_defs *storm, + u32 *cmd_data, + u32 cmd_size, + u32 *addr_data, + u32 addr_size, + u32 resp_size, u32 *dump_buf) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 vfc_status, polling_ms, polling_count = 0, i; + u32 reg_addr, sem_base; + bool is_ready = false; + + sem_base = storm->sem_fast_mem_addr; + polling_ms = VFC_POLLING_DELAY_MS * + s_hw_type_defs[dev_data->hw_type].delay_factor; + + /* Write VFC command */ + ARR_REG_WR(p_hwfn, + p_ptt, + sem_base + SEM_FAST_REG_VFC_DATA_WR, + cmd_data, cmd_size); + + /* Write VFC address */ + ARR_REG_WR(p_hwfn, + p_ptt, + sem_base + SEM_FAST_REG_VFC_ADDR, + addr_data, addr_size); + + /* Read response */ + for (i = 0; i < resp_size; i++) { + /* Poll until ready */ + do { + reg_addr = sem_base + SEM_FAST_REG_VFC_STATUS; + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + &vfc_status, + true, + BYTES_TO_DWORDS(reg_addr), + 1, + false, SPLIT_TYPE_NONE, 0); + is_ready = vfc_status & BIT(VFC_STATUS_RESP_READY_BIT); + + if (!is_ready) { + if (polling_count++ == VFC_POLLING_COUNT) + return 0; + + msleep(polling_ms); + } + } while (!is_ready); + + reg_addr = sem_base + SEM_FAST_REG_VFC_DATA_RD; + qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + i, + true, + BYTES_TO_DWORDS(reg_addr), + 1, false, SPLIT_TYPE_NONE, 0); + } + + return resp_size; +} + +/* Dump VFC CAM. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_vfc_cam(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump, u8 storm_id) +{ + u32 total_size = VFC_CAM_NUM_ROWS * VFC_CAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 cam_addr[VFC_CAM_ADDR_DWORDS] = { 0 }; + u32 cam_cmd[VFC_CAM_CMD_DWORDS] = { 0 }; + u32 row, offset = 0; + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + "vfc_cam", + 0, + total_size, + 256, + false, "vfc_cam", storm->letter); + + if (!dump) + return offset + total_size; + + /* Prepare CAM address */ + SET_VAR_FIELD(cam_addr, VFC_CAM_ADDR, OP, VFC_OPCODE_CAM_RD); + + /* Read VFC CAM data */ + for (row = 0; row < VFC_CAM_NUM_ROWS; row++) { + SET_VAR_FIELD(cam_cmd, VFC_CAM_CMD, ROW, row); + offset += qed_grc_dump_read_from_vfc(p_hwfn, + p_ptt, + storm, + cam_cmd, + VFC_CAM_CMD_DWORDS, + cam_addr, + VFC_CAM_ADDR_DWORDS, + VFC_CAM_RESP_DWORDS, + dump_buf + offset); + } + + return offset; +} + +/* Dump VFC RAM. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_vfc_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u8 storm_id, struct vfc_ram_defs *ram_defs) +{ + u32 total_size = ram_defs->num_rows * VFC_RAM_RESP_DWORDS; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 ram_addr[VFC_RAM_ADDR_DWORDS] = { 0 }; + u32 ram_cmd[VFC_RAM_CMD_DWORDS] = { 0 }; + u32 row, offset = 0; + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + ram_defs->mem_name, + 0, + total_size, + 256, + false, + ram_defs->type_name, + storm->letter); + + if (!dump) + return offset + total_size; + + /* Prepare RAM address */ + SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, OP, VFC_OPCODE_RAM_RD); + + /* Read VFC RAM data */ + for (row = ram_defs->base_row; + row < ram_defs->base_row + ram_defs->num_rows; row++) { + SET_VAR_FIELD(ram_addr, VFC_RAM_ADDR, ROW, row); + offset += qed_grc_dump_read_from_vfc(p_hwfn, + p_ptt, + storm, + ram_cmd, + VFC_RAM_CMD_DWORDS, + ram_addr, + VFC_RAM_ADDR_DWORDS, + VFC_RAM_RESP_DWORDS, + dump_buf + offset); + } + + return offset; +} + +/* Dumps GRC VFC data. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_vfc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + u8 storm_id, i; + u32 offset = 0; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + if (!qed_grc_is_storm_included(p_hwfn, + (enum dbg_storms)storm_id) || + !s_storm_defs[storm_id].has_vfc) + continue; + + /* Read CAM */ + offset += qed_grc_dump_vfc_cam(p_hwfn, + p_ptt, + dump_buf + offset, + dump, storm_id); + + /* Read RAM */ + for (i = 0; i < NUM_VFC_RAM_TYPES; i++) + offset += qed_grc_dump_vfc_ram(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + storm_id, + &s_vfc_ram_defs[i]); + } + + return offset; +} + +/* Dumps GRC RSS data. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_rss(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 offset = 0; + u8 rss_mem_id; + + for (rss_mem_id = 0; rss_mem_id < NUM_RSS_MEM_TYPES; rss_mem_id++) { + u32 rss_addr, num_entries, total_dwords; + struct rss_mem_defs *rss_defs; + u32 addr, num_dwords_to_read; + bool packed; + + rss_defs = &s_rss_mem_defs[rss_mem_id]; + rss_addr = rss_defs->addr; + num_entries = rss_defs->num_entries[dev_data->chip_id]; + total_dwords = (num_entries * rss_defs->entry_width) / 32; + packed = (rss_defs->entry_width == 16); + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + rss_defs->mem_name, + 0, + total_dwords, + rss_defs->entry_width, + packed, + rss_defs->type_name, 0); + + /* Dump RSS data */ + if (!dump) { + offset += total_dwords; + continue; + } + + addr = BYTES_TO_DWORDS(RSS_REG_RSS_RAM_DATA); + while (total_dwords) { + num_dwords_to_read = min_t(u32, + RSS_REG_RSS_RAM_DATA_SIZE, + total_dwords); + qed_wr(p_hwfn, p_ptt, RSS_REG_RSS_RAM_ADDR, rss_addr); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + num_dwords_to_read, + false, + SPLIT_TYPE_NONE, 0); + total_dwords -= num_dwords_to_read; + rss_addr++; + } + } + + return offset; +} + +/* Dumps GRC Big RAM. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_big_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump, u8 big_ram_id) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 block_size, ram_size, offset = 0, reg_val, i; + char mem_name[12] = "???_BIG_RAM"; + char type_name[8] = "???_RAM"; + struct big_ram_defs *big_ram; + + big_ram = &s_big_ram_defs[big_ram_id]; + ram_size = big_ram->ram_size[dev_data->chip_id]; + + reg_val = qed_rd(p_hwfn, p_ptt, big_ram->is_256b_reg_addr); + block_size = reg_val & + BIT(big_ram->is_256b_bit_offset[dev_data->chip_id]) ? 256 + : 128; + + strncpy(type_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + strncpy(mem_name, big_ram->instance_name, BIG_RAM_NAME_LEN); + + /* Dump memory header */ + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + mem_name, + 0, + ram_size, + block_size * 8, + false, type_name, 0); + + /* Read and dump Big RAM data */ + if (!dump) + return offset + ram_size; + + /* Dump Big RAM */ + for (i = 0; i < DIV_ROUND_UP(ram_size, BRB_REG_BIG_RAM_DATA_SIZE); + i++) { + u32 addr, len; + + qed_wr(p_hwfn, p_ptt, big_ram->addr_reg_addr, i); + addr = BYTES_TO_DWORDS(big_ram->data_reg_addr); + len = BRB_REG_BIG_RAM_DATA_SIZE; + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + false, SPLIT_TYPE_NONE, 0); + } + + return offset; +} + +/* Dumps MCP scratchpad. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_mcp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + bool block_enable[MAX_BLOCK_ID] = { 0 }; + u32 offset = 0, addr; + bool halted = false; + + /* Halt MCP */ + if (dump && !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { + halted = !qed_mcp_halt(p_hwfn, p_ptt); + if (!halted) + DP_NOTICE(p_hwfn, "MCP halt failed!\n"); + } + + /* Dump MCP scratchpad */ + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + NULL, + BYTES_TO_DWORDS(MCP_REG_SCRATCH), + MCP_REG_SCRATCH_SIZE, + false, 0, false, "MCP", 0); + + /* Dump MCP cpu_reg_file */ + offset += qed_grc_dump_mem(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + NULL, + BYTES_TO_DWORDS(MCP_REG_CPU_REG_FILE), + MCP_REG_CPU_REG_FILE_SIZE, + false, 0, false, "MCP", 0); + + /* Dump MCP registers */ + block_enable[BLOCK_MCP] = true; + offset += qed_grc_dump_registers(p_hwfn, + p_ptt, + dump_buf + offset, + dump, block_enable, "MCP"); + + /* Dump required non-MCP registers */ + offset += qed_grc_dump_regs_hdr(dump_buf + offset, + dump, 1, SPLIT_TYPE_NONE, 0, + "MCP"); + addr = BYTES_TO_DWORDS(MISC_REG_SHARED_MEM_ADDR); + offset += qed_grc_dump_reg_entry(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + 1, + false, SPLIT_TYPE_NONE, 0); + + /* Release MCP */ + if (halted && qed_mcp_resume(p_hwfn, p_ptt)) + DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); + + return offset; +} + +/* Dumps the tbus indirect memory for all PHYs. + * Returns the dumped size in dwords. + */ +static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + u32 offset = 0, tbus_lo_offset, tbus_hi_offset; + char mem_name[32]; + u8 phy_id; + + for (phy_id = 0; phy_id < ARRAY_SIZE(s_phy_defs); phy_id++) { + u32 addr_lo_addr, addr_hi_addr, data_lo_addr, data_hi_addr; + struct phy_defs *phy_defs; + u8 *bytes_buf; + + phy_defs = &s_phy_defs[phy_id]; + addr_lo_addr = phy_defs->base_addr + + phy_defs->tbus_addr_lo_addr; + addr_hi_addr = phy_defs->base_addr + + phy_defs->tbus_addr_hi_addr; + data_lo_addr = phy_defs->base_addr + + phy_defs->tbus_data_lo_addr; + data_hi_addr = phy_defs->base_addr + + phy_defs->tbus_data_hi_addr; + + if (snprintf(mem_name, sizeof(mem_name), "tbus_%s", + phy_defs->phy_name) < 0) + DP_NOTICE(p_hwfn, + "Unexpected debug error: invalid PHY memory name\n"); + + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + mem_name, + 0, + PHY_DUMP_SIZE_DWORDS, + 16, true, mem_name, 0); + + if (!dump) { + offset += PHY_DUMP_SIZE_DWORDS; + continue; + } + + bytes_buf = (u8 *)(dump_buf + offset); + for (tbus_hi_offset = 0; + tbus_hi_offset < (NUM_PHY_TBUS_ADDRESSES >> 8); + tbus_hi_offset++) { + qed_wr(p_hwfn, p_ptt, addr_hi_addr, tbus_hi_offset); + for (tbus_lo_offset = 0; tbus_lo_offset < 256; + tbus_lo_offset++) { + qed_wr(p_hwfn, + p_ptt, addr_lo_addr, tbus_lo_offset); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_lo_addr); + *(bytes_buf++) = (u8)qed_rd(p_hwfn, + p_ptt, + data_hi_addr); + } + } + + offset += PHY_DUMP_SIZE_DWORDS; + } + + return offset; +} + +/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + u32 hw_dump_offset_bytes = 0, hw_dump_size_bytes = 0; + u32 hw_dump_size_dwords = 0, offset = 0; + enum dbg_status status; + + /* Read HW dump image from NVRAM */ + status = qed_find_nvram_image(p_hwfn, + p_ptt, + NVM_TYPE_HW_DUMP_OUT, + &hw_dump_offset_bytes, + &hw_dump_size_bytes, + false); + if (status != DBG_STATUS_OK) + return 0; + + hw_dump_size_dwords = BYTES_TO_DWORDS(hw_dump_size_bytes); + + /* Dump HW dump image section */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "mcp_hw_dump", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", hw_dump_size_dwords); + + /* Read MCP HW dump image into dump buffer */ + if (dump && hw_dump_size_dwords) { + status = qed_nvram_read(p_hwfn, + p_ptt, + hw_dump_offset_bytes, + hw_dump_size_bytes, + dump_buf + offset, + false); + if (status != DBG_STATUS_OK) { + DP_NOTICE(p_hwfn, + "Failed to read MCP HW Dump image from NVRAM\n"); + return 0; + } + } + offset += hw_dump_size_dwords; + + return offset; +} + +/* Dumps Static Debug data. Returns the dumped size in dwords. */ +static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 block_id, line_id, offset = 0, addr, len; + + /* Don't dump static debug if a debug bus recording is in progress */ + if (dump && qed_rd(p_hwfn, p_ptt, DBG_REG_DBG_BLOCK_ON)) + return 0; + + if (dump) { + /* Disable debug bus in all blocks */ + qed_bus_disable_blocks(p_hwfn, p_ptt); + + qed_bus_reset_dbg_block(p_hwfn, p_ptt); + qed_wr(p_hwfn, + p_ptt, DBG_REG_FRAMING_MODE, DBG_BUS_FRAME_MODE_8HW); + qed_wr(p_hwfn, + p_ptt, DBG_REG_DEBUG_TARGET, DBG_BUS_TARGET_ID_INT_BUF); + qed_wr(p_hwfn, p_ptt, DBG_REG_FULL_MODE, 1); + qed_bus_enable_dbg_block(p_hwfn, p_ptt, true); + } + + /* Dump all static debug lines for each relevant block */ + for (block_id = 0; block_id < MAX_BLOCK_ID; block_id++) { + const struct dbg_block_chip *block_per_chip; + const struct dbg_block *block; + bool is_removed, has_dbg_bus; + u16 modes_buf_offset; + u32 block_dwords; + + block_per_chip = + qed_get_dbg_block_per_chip(p_hwfn, (enum block_id)block_id); + is_removed = GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_IS_REMOVED); + has_dbg_bus = GET_FIELD(block_per_chip->flags, + DBG_BLOCK_CHIP_HAS_DBG_BUS); + + if (!is_removed && has_dbg_bus && + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0) { + modes_buf_offset = + GET_FIELD(block_per_chip->dbg_bus_mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + if (!qed_is_mode_match(p_hwfn, &modes_buf_offset)) + has_dbg_bus = false; + } + + if (is_removed || !has_dbg_bus) + continue; + + block_dwords = NUM_DBG_LINES(block_per_chip) * + STATIC_DEBUG_LINE_DWORDS; + + /* Dump static section params */ + block = get_dbg_block(p_hwfn, (enum block_id)block_id); + offset += qed_grc_dump_mem_hdr(p_hwfn, + dump_buf + offset, + dump, + block->name, + 0, + block_dwords, + 32, false, "STATIC", 0); + + if (!dump) { + offset += block_dwords; + continue; + } + + /* If all lines are invalid - dump zeros */ + if (dev_data->block_in_reset[block_id]) { + memset(dump_buf + offset, 0, + DWORDS_TO_BYTES(block_dwords)); + offset += block_dwords; + continue; + } + + /* Enable block's client */ + qed_bus_enable_clients(p_hwfn, + p_ptt, + BIT(block_per_chip->dbg_client_id)); + + addr = BYTES_TO_DWORDS(DBG_REG_CALENDAR_OUT_DATA); + len = STATIC_DEBUG_LINE_DWORDS; + for (line_id = 0; line_id < (u32)NUM_DBG_LINES(block_per_chip); + line_id++) { + /* Configure debug line ID */ + qed_bus_config_dbg_line(p_hwfn, + p_ptt, + (enum block_id)block_id, + (u8)line_id, 0xf, 0, 0, 0); + + /* Read debug line info */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + len, + true, SPLIT_TYPE_NONE, + 0); + } + + /* Disable block's client and debug output */ + qed_bus_enable_clients(p_hwfn, p_ptt, 0); + qed_bus_config_dbg_line(p_hwfn, p_ptt, + (enum block_id)block_id, 0, 0, 0, 0, 0); + } + + if (dump) { + qed_bus_enable_dbg_block(p_hwfn, p_ptt, false); + qed_bus_enable_clients(p_hwfn, p_ptt, 0); + } + + return offset; +} + +/* Performs GRC Dump to the specified buffer. + * Returns the dumped size in dwords. + */ +static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + bool parities_masked = false; + u32 dwords_read, offset = 0; + u8 i; + + *num_dumped_dwords = 0; + dev_data->num_regs_read = 0; + + /* Update reset state */ + if (dump) + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 4); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "grc-dump"); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "num-lcids", + NUM_OF_LCIDS); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "num-ltids", + NUM_OF_LTIDS); + offset += qed_dump_num_param(dump_buf + offset, + dump, "num-ports", dev_data->num_ports); + + /* Dump reset registers (dumped before taking blocks out of reset ) */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) + offset += qed_grc_dump_reset_regs(p_hwfn, + p_ptt, + dump_buf + offset, dump); + + /* Take all blocks out of reset (using reset registers) */ + if (dump) { + qed_grc_unreset_blocks(p_hwfn, p_ptt, false); + qed_update_blocks_reset_state(p_hwfn, p_ptt); + } + + /* Disable all parities using MFW command */ + if (dump && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP)) { + parities_masked = !qed_mcp_mask_parities(p_hwfn, p_ptt, 1); + if (!parities_masked) { + DP_NOTICE(p_hwfn, + "Failed to mask parities using MFW\n"); + if (qed_grc_get_param + (p_hwfn, DBG_GRC_PARAM_PARITY_SAFE)) + return DBG_STATUS_MCP_COULD_NOT_MASK_PRTY; + } + } + + /* Dump modified registers (dumped before modifying them) */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) + offset += qed_grc_dump_modified_regs(p_hwfn, + p_ptt, + dump_buf + offset, dump); + + /* Stall storms */ + if (dump && + (qed_grc_is_included(p_hwfn, + DBG_GRC_PARAM_DUMP_IOR) || + qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC))) + qed_grc_stall_storms(p_hwfn, p_ptt, true); + + /* Dump all regs */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_REGS)) { + bool block_enable[MAX_BLOCK_ID]; + + /* Dump all blocks except MCP */ + for (i = 0; i < MAX_BLOCK_ID; i++) + block_enable[i] = true; + block_enable[BLOCK_MCP] = false; + offset += qed_grc_dump_registers(p_hwfn, + p_ptt, + dump_buf + + offset, + dump, + block_enable, NULL); + + /* Dump special registers */ + offset += qed_grc_dump_special_regs(p_hwfn, + p_ptt, + dump_buf + offset, dump); + } + + /* Dump memories */ + offset += qed_grc_dump_memories(p_hwfn, p_ptt, dump_buf + offset, dump); + + /* Dump MCP */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP)) + offset += qed_grc_dump_mcp(p_hwfn, + p_ptt, dump_buf + offset, dump); + + /* Dump context */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_CM_CTX)) + offset += qed_grc_dump_ctx(p_hwfn, + p_ptt, dump_buf + offset, dump); + + /* Dump RSS memories */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_RSS)) + offset += qed_grc_dump_rss(p_hwfn, + p_ptt, dump_buf + offset, dump); + + /* Dump Big RAM */ + for (i = 0; i < NUM_BIG_RAM_TYPES; i++) + if (qed_grc_is_included(p_hwfn, s_big_ram_defs[i].grc_param)) + offset += qed_grc_dump_big_ram(p_hwfn, + p_ptt, + dump_buf + offset, + dump, i); + + /* Dump VFC */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_VFC)) { + dwords_read = qed_grc_dump_vfc(p_hwfn, + p_ptt, dump_buf + offset, dump); + offset += dwords_read; + if (!dwords_read) + return DBG_STATUS_VFC_READ_ERROR; + } + + /* Dump PHY tbus */ + if (qed_grc_is_included(p_hwfn, + DBG_GRC_PARAM_DUMP_PHY) && dev_data->chip_id == + CHIP_K2 && dev_data->hw_type == HW_TYPE_ASIC) + offset += qed_grc_dump_phy(p_hwfn, + p_ptt, dump_buf + offset, dump); + + /* Dump MCP HW Dump */ + if (qed_grc_is_included(p_hwfn, DBG_GRC_PARAM_DUMP_MCP_HW_DUMP) && + !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP) && 1) + offset += qed_grc_dump_mcp_hw_dump(p_hwfn, + p_ptt, + dump_buf + offset, dump); + + /* Dump static debug data (only if not during debug bus recording) */ + if (qed_grc_is_included(p_hwfn, + DBG_GRC_PARAM_DUMP_STATIC) && + (!dump || dev_data->bus.state == DBG_BUS_STATE_IDLE)) + offset += qed_grc_dump_static_debug(p_hwfn, + p_ptt, + dump_buf + offset, dump); + + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + if (dump) { + /* Unstall storms */ + if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_UNSTALL)) + qed_grc_stall_storms(p_hwfn, p_ptt, false); + + /* Clear parity status */ + qed_grc_clear_all_prty(p_hwfn, p_ptt); + + /* Enable all parities using MFW command */ + if (parities_masked) + qed_mcp_mask_parities(p_hwfn, p_ptt, 0); + } + + *num_dumped_dwords = offset; + + return DBG_STATUS_OK; +} + +/* Writes the specified failing Idle Check rule to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u16 rule_id, + const struct dbg_idle_chk_rule *rule, + u16 fail_entry_id, u32 *cond_reg_values) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + const struct dbg_idle_chk_cond_reg *cond_regs; + const struct dbg_idle_chk_info_reg *info_regs; + u32 i, next_reg_offset = 0, offset = 0; + struct dbg_idle_chk_result_hdr *hdr; + const union dbg_idle_chk_reg *regs; + u8 reg_id; + + hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; + regs = (const union dbg_idle_chk_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + + rule->reg_offset; + cond_regs = ®s[0].cond_reg; + info_regs = ®s[rule->num_cond_regs].info_reg; + + /* Dump rule data */ + if (dump) { + memset(hdr, 0, sizeof(*hdr)); + hdr->rule_id = rule_id; + hdr->mem_entry_id = fail_entry_id; + hdr->severity = rule->severity; + hdr->num_dumped_cond_regs = rule->num_cond_regs; + } + + offset += IDLE_CHK_RESULT_HDR_DWORDS; + + /* Dump condition register values */ + for (reg_id = 0; reg_id < rule->num_cond_regs; reg_id++) { + const struct dbg_idle_chk_cond_reg *reg = &cond_regs[reg_id]; + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + + reg_hdr = + (struct dbg_idle_chk_result_reg_hdr *)(dump_buf + offset); + + /* Write register header */ + if (!dump) { + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + + reg->entry_size; + continue; + } + + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->start_entry = reg->start_entry; + reg_hdr->size = reg->entry_size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM, + reg->num_entries > 1 || reg->start_entry > 0 ? 1 : 0); + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, reg_id); + + /* Write register values */ + for (i = 0; i < reg_hdr->size; i++, next_reg_offset++, offset++) + dump_buf[offset] = cond_reg_values[next_reg_offset]; + } + + /* Dump info register values */ + for (reg_id = 0; reg_id < rule->num_info_regs; reg_id++) { + const struct dbg_idle_chk_info_reg *reg = &info_regs[reg_id]; + u32 block_id; + + /* Check if register's block is in reset */ + if (!dump) { + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS + reg->size; + continue; + } + + block_id = GET_FIELD(reg->data, DBG_IDLE_CHK_INFO_REG_BLOCK_ID); + if (block_id >= MAX_BLOCK_ID) { + DP_NOTICE(p_hwfn, "Invalid block_id\n"); + return 0; + } + + if (!dev_data->block_in_reset[block_id]) { + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool wide_bus, eval_mode, mode_match = true; + u16 modes_buf_offset; + u32 addr; + + reg_hdr = (struct dbg_idle_chk_result_reg_hdr *) + (dump_buf + offset); + + /* Check mode */ + eval_mode = GET_FIELD(reg->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + if (eval_mode) { + modes_buf_offset = + GET_FIELD(reg->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + mode_match = + qed_is_mode_match(p_hwfn, + &modes_buf_offset); + } + + if (!mode_match) + continue; + + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_ADDRESS); + wide_bus = GET_FIELD(reg->data, + DBG_IDLE_CHK_INFO_REG_WIDE_BUS); + + /* Write register header */ + offset += IDLE_CHK_RESULT_REG_HDR_DWORDS; + hdr->num_dumped_info_regs++; + memset(reg_hdr, 0, sizeof(*reg_hdr)); + reg_hdr->size = reg->size; + SET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID, + rule->num_cond_regs + reg_id); + + /* Write register values */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + addr, + reg->size, wide_bus, + SPLIT_TYPE_NONE, 0); + } + } + + return offset; +} + +/* Dumps idle check rule entries. Returns the dumped size in dwords. */ +static u32 +qed_idle_chk_dump_rule_entries(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 *dump_buf, bool dump, + const struct dbg_idle_chk_rule *input_rules, + u32 num_input_rules, u32 *num_failing_rules) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 cond_reg_values[IDLE_CHK_MAX_ENTRIES_SIZE]; + u32 i, offset = 0; + u16 entry_id; + u8 reg_id; + + *num_failing_rules = 0; + + for (i = 0; i < num_input_rules; i++) { + const struct dbg_idle_chk_cond_reg *cond_regs; + const struct dbg_idle_chk_rule *rule; + const union dbg_idle_chk_reg *regs; + u16 num_reg_entries = 1; + bool check_rule = true; + const u32 *imm_values; + + rule = &input_rules[i]; + regs = (const union dbg_idle_chk_reg *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr + + rule->reg_offset; + cond_regs = ®s[0].cond_reg; + imm_values = + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr + + rule->imm_offset; + + /* Check if all condition register blocks are out of reset, and + * find maximal number of entries (all condition registers that + * are memories must have the same size, which is > 1). + */ + for (reg_id = 0; reg_id < rule->num_cond_regs && check_rule; + reg_id++) { + u32 block_id = + GET_FIELD(cond_regs[reg_id].data, + DBG_IDLE_CHK_COND_REG_BLOCK_ID); + + if (block_id >= MAX_BLOCK_ID) { + DP_NOTICE(p_hwfn, "Invalid block_id\n"); + return 0; + } + + check_rule = !dev_data->block_in_reset[block_id]; + if (cond_regs[reg_id].num_entries > num_reg_entries) + num_reg_entries = cond_regs[reg_id].num_entries; + } + + if (!check_rule && dump) + continue; + + if (!dump) { + u32 entry_dump_size = + qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + false, + rule->rule_id, + rule, + 0, + NULL); + + offset += num_reg_entries * entry_dump_size; + (*num_failing_rules) += num_reg_entries; + continue; + } + + /* Go over all register entries (number of entries is the same + * for all condition registers). + */ + for (entry_id = 0; entry_id < num_reg_entries; entry_id++) { + u32 next_reg_offset = 0; + + /* Read current entry of all condition registers */ + for (reg_id = 0; reg_id < rule->num_cond_regs; + reg_id++) { + const struct dbg_idle_chk_cond_reg *reg = + &cond_regs[reg_id]; + u32 padded_entry_size, addr; + bool wide_bus; + + /* Find GRC address (if it's a memory, the + * address of the specific entry is calculated). + */ + addr = GET_FIELD(reg->data, + DBG_IDLE_CHK_COND_REG_ADDRESS); + wide_bus = + GET_FIELD(reg->data, + DBG_IDLE_CHK_COND_REG_WIDE_BUS); + if (reg->num_entries > 1 || + reg->start_entry > 0) { + padded_entry_size = + reg->entry_size > 1 ? + roundup_pow_of_two(reg->entry_size) : + 1; + addr += (reg->start_entry + entry_id) * + padded_entry_size; + } + + /* Read registers */ + if (next_reg_offset + reg->entry_size >= + IDLE_CHK_MAX_ENTRIES_SIZE) { + DP_NOTICE(p_hwfn, + "idle check registers entry is too large\n"); + return 0; + } + + next_reg_offset += + qed_grc_dump_addr_range(p_hwfn, p_ptt, + cond_reg_values + + next_reg_offset, + dump, addr, + reg->entry_size, + wide_bus, + SPLIT_TYPE_NONE, 0); + } + + /* Call rule condition function. + * If returns true, it's a failure. + */ + if ((*cond_arr[rule->cond_id]) (cond_reg_values, + imm_values)) { + offset += qed_idle_chk_dump_failure(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + rule->rule_id, + rule, + entry_id, + cond_reg_values); + (*num_failing_rules)++; + } + } + } + + return offset; +} + +/* Performs Idle Check Dump to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + struct virt_mem_desc *dbg_buf = + &p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES]; + u32 num_failing_rules_offset, offset = 0, + input_offset = 0, num_failing_rules = 0; + + /* Dump global params - 1 must match below amount of params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "idle-chk"); + + /* Dump idle check section header with a single parameter */ + offset += qed_dump_section_hdr(dump_buf + offset, dump, "idle_chk", 1); + num_failing_rules_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, dump, "num_rules", 0); + + while (input_offset < BYTES_TO_DWORDS(dbg_buf->size)) { + const struct dbg_idle_chk_cond_hdr *cond_hdr = + (const struct dbg_idle_chk_cond_hdr *)dbg_buf->ptr + + input_offset++; + bool eval_mode, mode_match = true; + u32 curr_failing_rules; + u16 modes_buf_offset; + + /* Check mode */ + eval_mode = GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + if (eval_mode) { + modes_buf_offset = + GET_FIELD(cond_hdr->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + mode_match = qed_is_mode_match(p_hwfn, + &modes_buf_offset); + } + + if (mode_match) { + const struct dbg_idle_chk_rule *rule = + (const struct dbg_idle_chk_rule *)((u32 *) + dbg_buf->ptr + + input_offset); + u32 num_input_rules = + cond_hdr->data_size / IDLE_CHK_RULE_SIZE_DWORDS; + offset += + qed_idle_chk_dump_rule_entries(p_hwfn, + p_ptt, + dump_buf + + offset, + dump, + rule, + num_input_rules, + &curr_failing_rules); + num_failing_rules += curr_failing_rules; + } + + input_offset += cond_hdr->data_size; + } + + /* Overwrite num_rules parameter */ + if (dump) + qed_dump_num_param(dump_buf + num_failing_rules_offset, + dump, "num_rules", num_failing_rules); + + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + return offset; +} + +/* Get info on the MCP Trace data in the scratchpad: + * - trace_data_grc_addr (OUT): trace data GRC address in bytes + * - trace_data_size (OUT): trace data size in bytes (without the header) + */ +static enum dbg_status qed_mcp_trace_get_data_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *trace_data_grc_addr, + u32 *trace_data_size) +{ + u32 spad_trace_offsize, signature; + + /* Read trace section offsize structure from MCP scratchpad */ + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); + + /* Extract trace section address from offsize (in scratchpad) */ + *trace_data_grc_addr = + MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize); + + /* Read signature from MCP trace section */ + signature = qed_rd(p_hwfn, p_ptt, + *trace_data_grc_addr + + offsetof(struct mcp_trace, signature)); + + if (signature != MFW_TRACE_SIGNATURE) + return DBG_STATUS_INVALID_TRACE_SIGNATURE; + + /* Read trace size from MCP trace section */ + *trace_data_size = qed_rd(p_hwfn, + p_ptt, + *trace_data_grc_addr + + offsetof(struct mcp_trace, size)); + + return DBG_STATUS_OK; +} + +/* Reads MCP trace meta data image from NVRAM + * - running_bundle_id (OUT): running bundle ID (invalid when loaded from file) + * - trace_meta_offset (OUT): trace meta offset in NVRAM in bytes (invalid when + * loaded from file). + * - trace_meta_size (OUT): size in bytes of the trace meta data. + */ +static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 trace_data_size_bytes, + u32 *running_bundle_id, + u32 *trace_meta_offset, + u32 *trace_meta_size) +{ + u32 spad_trace_offsize, nvram_image_type, running_mfw_addr; + + /* Read MCP trace section offsize structure from MCP scratchpad */ + spad_trace_offsize = qed_rd(p_hwfn, p_ptt, MCP_SPAD_TRACE_OFFSIZE_ADDR); + + /* Find running bundle ID */ + running_mfw_addr = + MCP_REG_SCRATCH + SECTION_OFFSET(spad_trace_offsize) + + QED_SECTION_SIZE(spad_trace_offsize) + trace_data_size_bytes; + *running_bundle_id = qed_rd(p_hwfn, p_ptt, running_mfw_addr); + if (*running_bundle_id > 1) + return DBG_STATUS_INVALID_NVRAM_BUNDLE; + + /* Find image in NVRAM */ + nvram_image_type = + (*running_bundle_id == + DIR_ID_1) ? NVM_TYPE_MFW_TRACE1 : NVM_TYPE_MFW_TRACE2; + return qed_find_nvram_image(p_hwfn, + p_ptt, + nvram_image_type, + trace_meta_offset, + trace_meta_size, + true); +} + +/* Reads the MCP Trace meta data from NVRAM into the specified buffer */ +static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 nvram_offset_in_bytes, + u32 size_in_bytes, u32 *buf) +{ + u8 modules_num, module_len, i, *byte_buf = (u8 *)buf; + enum dbg_status status; + u32 signature; + + /* Read meta data from NVRAM */ + status = qed_nvram_read(p_hwfn, + p_ptt, + nvram_offset_in_bytes, + size_in_bytes, + buf, + true); + if (status != DBG_STATUS_OK) + return status; + + /* Extract and check first signature */ + signature = qed_read_unaligned_dword(byte_buf); + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) + return DBG_STATUS_INVALID_TRACE_SIGNATURE; + + /* Extract number of modules */ + modules_num = *(byte_buf++); + + /* Skip all modules */ + for (i = 0; i < modules_num; i++) { + module_len = *(byte_buf++); + byte_buf += module_len; + } + + /* Extract and check second signature */ + signature = qed_read_unaligned_dword(byte_buf); + byte_buf += sizeof(signature); + if (signature != NVM_MAGIC_VALUE) + return DBG_STATUS_INVALID_TRACE_SIGNATURE; + + return DBG_STATUS_OK; +} + +/* Dump MCP Trace */ +static enum dbg_status qed_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) +{ + u32 trace_data_grc_addr, trace_data_size_bytes, trace_data_size_dwords; + u32 trace_meta_size_dwords = 0, running_bundle_id, offset = 0; + u32 trace_meta_offset_bytes = 0, trace_meta_size_bytes = 0; + enum dbg_status status; + int halted = 0; + bool use_mfw; + + *num_dumped_dwords = 0; + + use_mfw = !qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_NO_MCP); + + /* Get trace data info */ + status = qed_mcp_trace_get_data_info(p_hwfn, + p_ptt, + &trace_data_grc_addr, + &trace_data_size_bytes); + if (status != DBG_STATUS_OK) + return status; + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "mcp-trace"); + + /* Halt MCP while reading from scratchpad so the read data will be + * consistent. if halt fails, MCP trace is taken anyway, with a small + * risk that it may be corrupt. + */ + if (dump && use_mfw) { + halted = !qed_mcp_halt(p_hwfn, p_ptt); + if (!halted) + DP_NOTICE(p_hwfn, "MCP halt failed!\n"); + } + + /* Find trace data size */ + trace_data_size_dwords = + DIV_ROUND_UP(trace_data_size_bytes + sizeof(struct mcp_trace), + BYTES_IN_DWORD); + + /* Dump trace data section header and param */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "mcp_trace_data", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", trace_data_size_dwords); + + /* Read trace data from scratchpad into dump buffer */ + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + dump, + BYTES_TO_DWORDS(trace_data_grc_addr), + trace_data_size_dwords, false, + SPLIT_TYPE_NONE, 0); + + /* Resume MCP (only if halt succeeded) */ + if (halted && qed_mcp_resume(p_hwfn, p_ptt)) + DP_NOTICE(p_hwfn, "Failed to resume MCP after halt!\n"); + + /* Dump trace meta section header */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "mcp_trace_meta", 1); + + /* If MCP Trace meta size parameter was set, use it. + * Otherwise, read trace meta. + * trace_meta_size_bytes is dword-aligned. + */ + trace_meta_size_bytes = + qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_MCP_TRACE_META_SIZE); + if ((!trace_meta_size_bytes || dump) && use_mfw) + status = qed_mcp_trace_get_meta_info(p_hwfn, + p_ptt, + trace_data_size_bytes, + &running_bundle_id, + &trace_meta_offset_bytes, + &trace_meta_size_bytes); + if (status == DBG_STATUS_OK) + trace_meta_size_dwords = BYTES_TO_DWORDS(trace_meta_size_bytes); + + /* Dump trace meta size param */ + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", trace_meta_size_dwords); + + /* Read trace meta image into dump buffer */ + if (dump && trace_meta_size_dwords) + status = qed_mcp_trace_read_meta(p_hwfn, + p_ptt, + trace_meta_offset_bytes, + trace_meta_size_bytes, + dump_buf + offset); + if (status == DBG_STATUS_OK) + offset += trace_meta_size_dwords; + + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + *num_dumped_dwords = offset; + + /* If no mcp access, indicate that the dump doesn't contain the meta + * data from NVRAM. + */ + return use_mfw ? status : DBG_STATUS_NVRAM_GET_IMAGE_FAILED; +} + +/* Dump GRC FIFO */ +static enum dbg_status qed_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) +{ + u32 dwords_read, size_param_offset, offset = 0, addr, len; + bool fifo_has_data; + + *num_dumped_dwords = 0; + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "reg-fifo"); + + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. + */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "reg_fifo_data", 1); + size_param_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + + if (!dump) { + /* FIFO max size is REG_FIFO_DEPTH_DWORDS. There is no way to + * test how much data is available, except for reading it. + */ + offset += REG_FIFO_DEPTH_DWORDS; + goto out; + } + + fifo_has_data = qed_rd(p_hwfn, p_ptt, + GRC_REG_TRACE_FIFO_VALID_DATA) > 0; + + /* Pull available data from fifo. Use DMAE since this is widebus memory + * and must be accessed atomically. Test for dwords_read not passing + * buffer size since more entries could be added to the buffer as we are + * emptying it. + */ + addr = BYTES_TO_DWORDS(GRC_REG_TRACE_FIFO); + len = REG_FIFO_ELEMENT_DWORDS; + for (dwords_read = 0; + fifo_has_data && dwords_read < REG_FIFO_DEPTH_DWORDS; + dwords_read += REG_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true, SPLIT_TYPE_NONE, + 0); + fifo_has_data = qed_rd(p_hwfn, p_ptt, + GRC_REG_TRACE_FIFO_VALID_DATA) > 0; + } + + qed_dump_num_param(dump_buf + size_param_offset, dump, "size", + dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + *num_dumped_dwords = offset; + + return DBG_STATUS_OK; +} + +/* Dump IGU FIFO */ +static enum dbg_status qed_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, u32 *num_dumped_dwords) +{ + u32 dwords_read, size_param_offset, offset = 0, addr, len; + bool fifo_has_data; + + *num_dumped_dwords = 0; + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "igu-fifo"); + + /* Dump fifo data section header and param. The size param is 0 for + * now, and is overwritten after reading the FIFO. + */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "igu_fifo_data", 1); + size_param_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + + if (!dump) { + /* FIFO max size is IGU_FIFO_DEPTH_DWORDS. There is no way to + * test how much data is available, except for reading it. + */ + offset += IGU_FIFO_DEPTH_DWORDS; + goto out; + } + + fifo_has_data = qed_rd(p_hwfn, p_ptt, + IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; + + /* Pull available data from fifo. Use DMAE since this is widebus memory + * and must be accessed atomically. Test for dwords_read not passing + * buffer size since more entries could be added to the buffer as we are + * emptying it. + */ + addr = BYTES_TO_DWORDS(IGU_REG_ERROR_HANDLING_MEMORY); + len = IGU_FIFO_ELEMENT_DWORDS; + for (dwords_read = 0; + fifo_has_data && dwords_read < IGU_FIFO_DEPTH_DWORDS; + dwords_read += IGU_FIFO_ELEMENT_DWORDS) { + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + len, + true, SPLIT_TYPE_NONE, + 0); + fifo_has_data = qed_rd(p_hwfn, p_ptt, + IGU_REG_ERROR_HANDLING_DATA_VALID) > 0; + } + + qed_dump_num_param(dump_buf + size_param_offset, dump, "size", + dwords_read); +out: + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + *num_dumped_dwords = offset; + + return DBG_STATUS_OK; +} + +/* Protection Override dump */ +static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 *num_dumped_dwords) +{ + u32 size_param_offset, override_window_dwords, offset = 0, addr; + + *num_dumped_dwords = 0; + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "protection-override"); + + /* Dump data section header and param. The size param is 0 for now, + * and is overwritten after reading the data. + */ + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "protection_override_data", 1); + size_param_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0); + + if (!dump) { + offset += PROTECTION_OVERRIDE_DEPTH_DWORDS; + goto out; + } + + /* Add override window info to buffer */ + override_window_dwords = + qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) * + PROTECTION_OVERRIDE_ELEMENT_DWORDS; + if (override_window_dwords) { + addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW); + offset += qed_grc_dump_addr_range(p_hwfn, + p_ptt, + dump_buf + offset, + true, + addr, + override_window_dwords, + true, SPLIT_TYPE_NONE, 0); + qed_dump_num_param(dump_buf + size_param_offset, dump, "size", + override_window_dwords); + } +out: + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + *num_dumped_dwords = offset; + + return DBG_STATUS_OK; +} + +/* Performs FW Asserts Dump to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, bool dump) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct fw_asserts_ram_section *asserts; + char storm_letter_str[2] = "?"; + struct fw_info fw_info; + u32 offset = 0; + u8 storm_id; + + /* Dump global params */ + offset += qed_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, dump, 1); + offset += qed_dump_str_param(dump_buf + offset, + dump, "dump-type", "fw-asserts"); + + /* Find Storm dump size */ + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + u32 fw_asserts_section_addr, next_list_idx_addr, next_list_idx; + struct storm_defs *storm = &s_storm_defs[storm_id]; + u32 last_list_idx, addr; + + if (dev_data->block_in_reset[storm->sem_block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, &fw_info); + + asserts = &fw_info.fw_asserts_section; + + /* Dump FW Asserts section header and params */ + storm_letter_str[0] = storm->letter; + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "fw_asserts", 2); + offset += qed_dump_str_param(dump_buf + offset, + dump, "storm", storm_letter_str); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "size", + asserts->list_element_dword_size); + + /* Read and dump FW Asserts data */ + if (!dump) { + offset += asserts->list_element_dword_size; + continue; + } + + addr = le16_to_cpu(asserts->section_ram_line_offset); + fw_asserts_section_addr = storm->sem_fast_mem_addr + + SEM_FAST_REG_INT_RAM + + RAM_LINES_TO_BYTES(addr); + + next_list_idx_addr = fw_asserts_section_addr + + DWORDS_TO_BYTES(asserts->list_next_index_dword_offset); + next_list_idx = qed_rd(p_hwfn, p_ptt, next_list_idx_addr); + last_list_idx = (next_list_idx > 0 ? + next_list_idx : + asserts->list_num_elements) - 1; + addr = BYTES_TO_DWORDS(fw_asserts_section_addr) + + asserts->list_dword_offset + + last_list_idx * asserts->list_element_dword_size; + offset += + qed_grc_dump_addr_range(p_hwfn, p_ptt, + dump_buf + offset, + dump, addr, + asserts->list_element_dword_size, + false, SPLIT_TYPE_NONE, 0); + } + + /* Dump last section */ + offset += qed_dump_last_section(dump_buf, offset, dump); + + return offset; +} + +/* Dumps the specified ILT pages to the specified buffer. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset, + bool *dump, u32 start_page_id, + u32 num_pages, + struct phys_mem_desc *ilt_pages, + bool dump_page_ids, u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) +{ + u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; + u32 page_id, end_page_id, offset = *given_offset; + struct phys_mem_desc *mem_desc = NULL; + bool continue_dump = *dump; + u32 partial_page_size = 0; + + if (num_pages == 0) + return offset; + + end_page_id = start_page_id + num_pages - 1; + + for (page_id = start_page_id; page_id <= end_page_id; page_id++) { + mem_desc = &ilt_pages[page_id]; + if (!ilt_pages[page_id].virt_addr) + continue; + + if (dump_page_ids) { + /* Copy page ID to dump buffer + * (if dump is needed and buffer is not full) + */ + if ((continue_dump) && + (offset + 1 > buf_size_in_dwords)) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + if (continue_dump) + *(dump_buf + offset) = page_id; + offset++; + } else { + /* Copy page memory to dump buffer */ + if ((continue_dump) && + (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords)) { + if (offset + BYTES_TO_DWORDS(mem_desc->size) > + buf_size_in_dwords) { + partial_page_size = + buf_size_in_dwords - offset; + memcpy(dump_buf + offset, + mem_desc->virt_addr, + partial_page_size); + continue_dump = false; + actual_dump_size_in_dwords = + offset + partial_page_size; + } + } + + if (continue_dump) + memcpy(dump_buf + offset, + mem_desc->virt_addr, mem_desc->size); + offset += BYTES_TO_DWORDS(mem_desc->size); + } + } + + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; + + return offset; +} + +/* Dumps a section containing the dumped ILT pages. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 *given_offset, + bool *dump, + u32 valid_conn_pf_pages, + u32 valid_conn_vf_pages, + struct phys_mem_desc *ilt_pages, + bool dump_page_ids, + u32 buf_size_in_dwords, + u32 *given_actual_dump_size_in_dwords) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 pf_start_line, start_page_id, offset = *given_offset; + u32 cdut_pf_init_pages, cdut_vf_init_pages; + u32 cdut_pf_work_pages, cdut_vf_work_pages; + u32 base_data_offset, size_param_offset; + u32 src_pages; + u32 section_header_and_param_size; + u32 cdut_pf_pages, cdut_vf_pages; + u32 actual_dump_size_in_dwords; + bool continue_dump = *dump; + bool update_size = *dump; + const char *section_name; + u32 i; + + actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords; + section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem"; + cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn); + cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn); + cdut_pf_work_pages = qed_get_cdut_num_pf_work_pages(p_hwfn); + cdut_vf_work_pages = qed_get_cdut_num_vf_work_pages(p_hwfn); + cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages; + cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages; + pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line; + section_header_and_param_size = qed_dump_section_hdr(NULL, + false, + section_name, + 1) + + qed_dump_num_param(NULL, false, "size", 0); + + if ((continue_dump) && + (offset + section_header_and_param_size > buf_size_in_dwords)) { + continue_dump = false; + update_size = false; + actual_dump_size_in_dwords = offset; + } + + offset += qed_dump_section_hdr(dump_buf + offset, + continue_dump, section_name, 1); + + /* Dump size parameter (0 for now, overwritten with real size later) */ + size_param_offset = offset; + offset += qed_dump_num_param(dump_buf + offset, + continue_dump, "size", 0); + base_data_offset = offset; + + /* CDUC pages are ordered as follows: + * - PF pages - valid section (included in PF connection type mapping) + * - PF pages - invalid section (not dumped) + * - For each VF in the PF: + * - VF pages - valid section (included in VF connection type mapping) + * - VF pages - invalid section (not dumped) + */ + if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) { + /* Dump connection PF pages */ + start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, valid_conn_pf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + + /* Dump connection VF pages */ + start_page_id += clients[ILT_CLI_CDUC].pf_total_lines; + for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; + i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines) + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + valid_conn_vf_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /* CDUT pages are ordered as follows: + * - PF init pages (not dumped) + * - PF work pages + * - For each VF in the PF: + * - VF init pages (not dumped) + * - VF work pages + */ + if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUT)) { + /* Dump task PF pages */ + start_page_id = clients[ILT_CLI_CDUT].first.val + + cdut_pf_init_pages - pf_start_line; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, cdut_pf_work_pages, + ilt_pages, dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + + /* Dump task VF pages */ + start_page_id = clients[ILT_CLI_CDUT].first.val + + cdut_pf_pages + cdut_vf_init_pages - pf_start_line; + for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count; + i++, start_page_id += cdut_vf_pages) + qed_ilt_dump_pages_range(dump_buf, &offset, + &continue_dump, start_page_id, + cdut_vf_work_pages, ilt_pages, + dump_page_ids, + buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /*Dump Searcher pages */ + if (clients[ILT_CLI_SRC].active) { + start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line; + src_pages = clients[ILT_CLI_SRC].last.val - + clients[ILT_CLI_SRC].first.val + 1; + qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump, + start_page_id, src_pages, ilt_pages, + dump_page_ids, buf_size_in_dwords, + &actual_dump_size_in_dwords); + } + + /* Overwrite size param */ + if (update_size) { + u32 section_size = (*dump == continue_dump) ? + offset - base_data_offset : + actual_dump_size_in_dwords - base_data_offset; + if (section_size > 0) + qed_dump_num_param(dump_buf + size_param_offset, + *dump, "size", section_size); + else if ((section_size == 0) && (*dump != continue_dump)) + actual_dump_size_in_dwords -= + section_header_and_param_size; + } + + *dump = continue_dump; + *given_offset = offset; + *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords; + + return offset; +} + +/* Dumps a section containing the global parameters. + * Part of ilt dump process + * Returns the dumped size in dwords. + */ +static u32 +qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + bool dump, + u32 cduc_page_size, + u32 conn_ctx_size, + u32 cdut_page_size, + u32 *full_dump_size_param_offset, + u32 *actual_dump_size_param_offset) +{ + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; + u32 offset = 0; + + offset += qed_dump_common_global_params(p_hwfn, p_ptt, + dump_buf + offset, + dump, 30); + offset += qed_dump_str_param(dump_buf + offset, + dump, + "dump-type", "ilt-dump"); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-page-size", + cduc_page_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-first-page-id", + clients[ILT_CLI_CDUC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-last-page-id", + clients[ILT_CLI_CDUC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-num-pf-pages", + clients[ILT_CLI_CDUC].pf_total_lines); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cduc-num-vf-pages", + clients[ILT_CLI_CDUC].vf_total_lines); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "max-conn-ctx-size", + conn_ctx_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-page-size", + cdut_page_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-first-page-id", + clients[ILT_CLI_CDUT].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-last-page-id", + clients[ILT_CLI_CDUT].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-pf-init-pages", + qed_get_cdut_num_pf_init_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-vf-init-pages", + qed_get_cdut_num_vf_init_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-pf-work-pages", + qed_get_cdut_num_pf_work_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "cdut-num-vf-work-pages", + qed_get_cdut_num_vf_work_pages(p_hwfn)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "max-task-ctx-size", + p_hwfn->p_cxt_mngr->task_ctx_size); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "first-vf-id-in-pf", + p_hwfn->p_cxt_mngr->first_vf_in_pf); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "num-vfs-in-pf", + p_hwfn->p_cxt_mngr->vf_count); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ptr-size-bytes", + sizeof(void *)); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "pf-start-line", + p_hwfn->p_cxt_mngr->pf_start_line); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "page-mem-desc-size-dwords", + PAGE_MEM_DESC_SIZE_DWORDS); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "ilt-shadow-size", + p_hwfn->p_cxt_mngr->ilt_shadow_size); + + *full_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, "dump-size-full", 0); + + *actual_dump_size_param_offset = offset; + + offset += qed_dump_num_param(dump_buf + offset, + dump, + "dump-size-actual", 0); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "iscsi_task_pages", + p_hwfn->p_cxt_mngr->iscsi_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "fcoe_task_pages", + p_hwfn->p_cxt_mngr->fcoe_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "roce_task_pages", + p_hwfn->p_cxt_mngr->roce_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "eth_task_pages", + p_hwfn->p_cxt_mngr->eth_task_pages); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-first-page-id", + clients[ILT_CLI_SRC].first.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-last-page-id", + clients[ILT_CLI_SRC].last.val); + offset += qed_dump_num_param(dump_buf + offset, + dump, + "src-is-active", + clients[ILT_CLI_SRC].active); + + /* Additional/Less parameters require matching of number in call to + * dump_common_global_params() + */ + + return offset; +} + +/* Dump section containing number of PF CIDs per connection type. + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_pf_cids) +{ + u32 num_pf_cids = 0; + u32 offset = 0; + u8 conn_type; + + offset += qed_dump_section_hdr(dump_buf + offset, + dump, "num_pf_cids_per_conn_type", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_pf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count; + if (dump) + *(dump_buf + offset) = num_pf_cids; + *valid_conn_pf_cids += num_pf_cids; + } + + return offset; +} + +/* Dump section containing number of VF CIDs per connection type + * Part of ilt dump process. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + bool dump, u32 *valid_conn_vf_cids) +{ + u32 num_vf_cids = 0; + u32 offset = 0; + u8 conn_type; + + offset += qed_dump_section_hdr(dump_buf + offset, dump, + "num_vf_cids_per_conn_type", 1); + offset += qed_dump_num_param(dump_buf + offset, + dump, "size", NUM_OF_CONNECTION_TYPES); + for (conn_type = 0, *valid_conn_vf_cids = 0; + conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) { + num_vf_cids = + p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf; + if (dump) + *(dump_buf + offset) = num_vf_cids; + *valid_conn_vf_cids += num_vf_cids; + } + + return offset; +} + +/* Performs ILT Dump to the specified buffer. + * buf_size_in_dwords - The dumped buffer size. + * Returns the dumped size in dwords. + */ +static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, u32 buf_size_in_dwords, bool dump) +{ +#if ((!defined VMWARE) && (!defined UEFI)) + struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients; +#endif + u32 valid_conn_vf_cids = 0, + valid_conn_vf_pages, offset = 0, real_dumped_size = 0; + u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages; + u32 num_cids_per_page, conn_ctx_size; + u32 cduc_page_size, cdut_page_size; + u32 actual_dump_size_in_dwords = 0; + struct phys_mem_desc *ilt_pages; + u32 actul_dump_off = 0; + u32 last_section_size; + u32 full_dump_off = 0; + u32 section_size = 0; + bool continue_dump; + u32 page_id; + + last_section_size = qed_dump_last_section(NULL, 0, false); + cduc_page_size = 1 << + (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + cdut_page_size = 1 << + (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN); + conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size; + num_cids_per_page = (int)(cduc_page_size / conn_ctx_size); + ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow; + continue_dump = dump; + + /* if need to dump then save memory for the last section + * (last section calculates CRC of dumped data) + */ + if (dump) { + if (buf_size_in_dwords >= last_section_size) { + buf_size_in_dwords -= last_section_size; + } else { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + /* Dump global params */ + + /* if need to dump then first check that there is enough memory + * in dumped buffer for this section calculate the size of this + * section without dumping. if there is not enough memory - then + * stop the dumping. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + NULL, + false, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_common_global_params(p_hwfn, + p_ptt, + dump_buf + offset, + continue_dump, + cduc_page_size, + conn_ctx_size, + cdut_page_size, + &full_dump_off, + &actul_dump_off); + + /* Dump section containing number of PF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_pf_cids(p_hwfn, + NULL, + false, + &valid_conn_pf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_pf_cids); + + /* Dump section containing number of VF CIDs per connection type + * If need to dump then first check that there is enough memory in + * dumped buffer for this section. + */ + if (continue_dump) { + section_size = + qed_ilt_dump_dump_num_vf_cids(p_hwfn, + NULL, + false, + &valid_conn_vf_cids); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn, + dump_buf + offset, + continue_dump, + &valid_conn_vf_cids); + + /* Dump section containing physical memory descriptors for each + * ILT page. + */ + num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size; + + /* If need to dump then first check that there is enough memory + * in dumped buffer for the section header. + */ + if (continue_dump) { + section_size = qed_dump_section_hdr(NULL, + false, + "ilt_page_desc", + 1) + + qed_dump_num_param(NULL, + false, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + if (offset + section_size > buf_size_in_dwords) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + + offset += qed_dump_section_hdr(dump_buf + offset, + continue_dump, "ilt_page_desc", 1); + offset += qed_dump_num_param(dump_buf + offset, + continue_dump, + "size", + num_pages * PAGE_MEM_DESC_SIZE_DWORDS); + + /* Copy memory descriptors to dump buffer + * If need to dump then dump till the dump buffer size + */ + if (continue_dump) { + for (page_id = 0; page_id < num_pages; + page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) { + if (continue_dump && + (offset + PAGE_MEM_DESC_SIZE_DWORDS <= + buf_size_in_dwords)) { + memcpy(dump_buf + offset, + &ilt_pages[page_id], + DWORDS_TO_BYTES + (PAGE_MEM_DESC_SIZE_DWORDS)); + } else { + if (continue_dump) { + continue_dump = false; + actual_dump_size_in_dwords = offset; + } + } + } + } else { + offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS; + } + + valid_conn_pf_pages = DIV_ROUND_UP(valid_conn_pf_cids, + num_cids_per_page); + valid_conn_vf_pages = DIV_ROUND_UP(valid_conn_vf_cids, + num_cids_per_page); + + /* Dump ILT pages IDs */ + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, true, buf_size_in_dwords, + &actual_dump_size_in_dwords); + + /* Dump ILT pages memory */ + qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump, + valid_conn_pf_pages, valid_conn_vf_pages, + ilt_pages, false, buf_size_in_dwords, + &actual_dump_size_in_dwords); + + real_dumped_size = + (continue_dump == dump) ? offset : actual_dump_size_in_dwords; + qed_dump_num_param(dump_buf + full_dump_off, dump, + "full-dump-size", offset + last_section_size); + qed_dump_num_param(dump_buf + actul_dump_off, + dump, + "actual-dump-size", + real_dumped_size + last_section_size); + + /* Dump last section */ + real_dumped_size += qed_dump_last_section(dump_buf, + real_dumped_size, dump); + + return real_dumped_size; +} + +/***************************** Public Functions *******************************/ + +enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr) +{ + struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; + u8 buf_id; + + /* Convert binary data to debug arrays */ + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) + qed_set_dbg_bin_buf(p_hwfn, + buf_id, + (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), + buf_hdrs[buf_id].length); + + return DBG_STATUS_OK; +} + +static enum dbg_status qed_dbg_set_app_ver(u32 ver) +{ + if (ver < TOOLS_VERSION) + return DBG_STATUS_UNSUPPORTED_APP_VERSION; + + s_app_ver = ver; + + return DBG_STATUS_OK; +} + +bool qed_read_fw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct fw_info *fw_info) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u8 storm_id; + + for (storm_id = 0; storm_id < MAX_DBG_STORMS; storm_id++) { + struct storm_defs *storm = &s_storm_defs[storm_id]; + + /* Skip Storm if it's in reset */ + if (dev_data->block_in_reset[storm->sem_block_id]) + continue; + + /* Read FW info for the current Storm */ + qed_read_storm_fw_info(p_hwfn, p_ptt, storm_id, fw_info); + + return true; + } + + return false; +} + +enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, + enum dbg_grc_params grc_param, u32 val) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + enum dbg_status status; + int i; + + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "dbg_grc_config: paramId = %d, val = %d\n", grc_param, val); + + status = qed_dbg_dev_init(p_hwfn); + if (status != DBG_STATUS_OK) + return status; + + /* Initializes the GRC parameters (if not initialized). Needed in order + * to set the default parameter values for the first time. + */ + qed_dbg_grc_init_params(p_hwfn); + + if (grc_param >= MAX_DBG_GRC_PARAMS) + return DBG_STATUS_INVALID_ARGS; + if (val < s_grc_param_defs[grc_param].min || + val > s_grc_param_defs[grc_param].max) + return DBG_STATUS_INVALID_ARGS; + + if (s_grc_param_defs[grc_param].is_preset) { + /* Preset param */ + + /* Disabling a preset is not allowed. Call + * dbg_grc_set_params_default instead. + */ + if (!val) + return DBG_STATUS_INVALID_ARGS; + + /* Update all params with the preset values */ + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) { + struct grc_param_defs *defs = &s_grc_param_defs[i]; + u32 preset_val; + /* Skip persistent params */ + if (defs->is_persistent) + continue; + + /* Find preset value */ + if (grc_param == DBG_GRC_PARAM_EXCLUDE_ALL) + preset_val = + defs->exclude_all_preset_val; + else if (grc_param == DBG_GRC_PARAM_CRASH) + preset_val = + defs->crash_preset_val[dev_data->chip_id]; + else + return DBG_STATUS_INVALID_ARGS; + + qed_grc_set_param(p_hwfn, i, preset_val); + } + } else { + /* Regular param - set its value */ + qed_grc_set_param(p_hwfn, grc_param, val); + } + + return DBG_STATUS_OK; +} + +/* Assign default GRC param values */ +void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + u32 i; + + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + if (!s_grc_param_defs[i].is_persistent) + dev_data->grc.param_val[i] = + s_grc_param_defs[i].default_val[dev_data->chip_id]; +} + +enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_REG].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_DUMP_MEM].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + return qed_grc_dump(p_hwfn, p_ptt, NULL, false, buf_size); +} + +enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = qed_dbg_grc_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Doesn't do anything, needed for compile time asserts */ + qed_static_asserts(); + + /* GRC Dump */ + status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; +} + +enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + struct idle_chk_data *idle_chk = &dev_data->idle_chk; + enum dbg_status status; + + *buf_size = 0; + + status = qed_dbg_dev_init(p_hwfn); + if (status != DBG_STATUS_OK) + return status; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_REGS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_IMMS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_RULES].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + if (!idle_chk->buf_size_set) { + idle_chk->buf_size = qed_idle_chk_dump(p_hwfn, + p_ptt, NULL, false); + idle_chk->buf_size_set = true; + } + + *buf_size = idle_chk->buf_size; + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = qed_dbg_idle_chk_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Update reset state */ + qed_grc_unreset_blocks(p_hwfn, p_ptt, true); + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + /* Idle Check Dump */ + *num_dumped_dwords = qed_idle_chk_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + return qed_mcp_trace_dump(p_hwfn, p_ptt, NULL, false, buf_size); +} + +enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + status = + qed_dbg_mcp_trace_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK && status != + DBG_STATUS_NVRAM_GET_IMAGE_FAILED) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Update reset state */ + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + /* Perform dump */ + status = qed_mcp_trace_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; +} + +enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + return qed_reg_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); +} + +enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = qed_dbg_reg_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Update reset state */ + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + status = qed_reg_fifo_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; +} + +enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + return qed_igu_fifo_dump(p_hwfn, p_ptt, NULL, false, buf_size); +} + +enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = qed_dbg_igu_fifo_get_dump_buf_size(p_hwfn, + p_ptt, + &needed_buf_size_in_dwords); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Update reset state */ + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + status = qed_igu_fifo_dump(p_hwfn, + p_ptt, dump_buf, true, num_dumped_dwords); + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; +} + +enum dbg_status +qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + return qed_protection_override_dump(p_hwfn, + p_ptt, NULL, false, buf_size); +} + +enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = + qed_dbg_protection_override_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + /* Update reset state */ + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + status = qed_protection_override_dump(p_hwfn, + p_ptt, + dump_buf, + true, num_dumped_dwords); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return status; +} + +enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + /* Update reset state */ + qed_update_blocks_reset_state(p_hwfn, p_ptt); + + *buf_size = qed_fw_asserts_dump(p_hwfn, p_ptt, NULL, false); + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + u32 needed_buf_size_in_dwords, *p_size = &needed_buf_size_in_dwords; + enum dbg_status status; + + *num_dumped_dwords = 0; + + status = + qed_dbg_fw_asserts_get_dump_buf_size(p_hwfn, + p_ptt, + p_size); + if (status != DBG_STATUS_OK) + return status; + + if (buf_size_in_dwords < needed_buf_size_in_dwords) + return DBG_STATUS_DUMP_BUF_TOO_SMALL; + + *num_dumped_dwords = qed_fw_asserts_dump(p_hwfn, p_ptt, dump_buf, true); + + /* Revert GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return DBG_STATUS_OK; +} + +static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *buf_size) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + + *buf_size = 0; + + if (status != DBG_STATUS_OK) + return status; + + *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false); + + return DBG_STATUS_OK; +} + +static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *dump_buf, + u32 buf_size_in_dwords, + u32 *num_dumped_dwords) +{ + *num_dumped_dwords = qed_ilt_dump(p_hwfn, + p_ptt, + dump_buf, buf_size_in_dwords, true); + + /* Reveret GRC params to their default */ + qed_dbg_grc_set_params_default(p_hwfn); + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum block_id block_id, + enum dbg_attn_type attn_type, + bool clear_status, + struct dbg_attn_block_result *results) +{ + enum dbg_status status = qed_dbg_dev_init(p_hwfn); + u8 reg_idx, num_attn_regs, num_result_regs = 0; + const struct dbg_attn_reg *attn_reg_arr; + + if (status != DBG_STATUS_OK) + return status; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_MODE_TREE].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_BLOCKS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_REGS].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + attn_reg_arr = qed_get_block_attn_regs(p_hwfn, + block_id, + attn_type, &num_attn_regs); + + for (reg_idx = 0; reg_idx < num_attn_regs; reg_idx++) { + const struct dbg_attn_reg *reg_data = &attn_reg_arr[reg_idx]; + struct dbg_attn_reg_result *reg_result; + u32 sts_addr, sts_val; + u16 modes_buf_offset; + bool eval_mode; + + /* Check mode */ + eval_mode = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_EVAL_MODE) > 0; + modes_buf_offset = GET_FIELD(reg_data->mode.data, + DBG_MODE_HDR_MODES_BUF_OFFSET); + if (eval_mode && !qed_is_mode_match(p_hwfn, &modes_buf_offset)) + continue; + + /* Mode match - read attention status register */ + sts_addr = DWORDS_TO_BYTES(clear_status ? + reg_data->sts_clr_address : + GET_FIELD(reg_data->data, + DBG_ATTN_REG_STS_ADDRESS)); + sts_val = qed_rd(p_hwfn, p_ptt, sts_addr); + if (!sts_val) + continue; + + /* Non-zero attention status - add to results */ + reg_result = &results->reg_results[num_result_regs]; + SET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS, sts_addr); + SET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_NUM_REG_ATTN, + GET_FIELD(reg_data->data, DBG_ATTN_REG_NUM_REG_ATTN)); + reg_result->block_attn_offset = reg_data->block_attn_offset; + reg_result->sts_val = sts_val; + reg_result->mask_val = qed_rd(p_hwfn, + p_ptt, + DWORDS_TO_BYTES + (reg_data->mask_address)); + num_result_regs++; + } + + results->block_id = (u8)block_id; + results->names_offset = + qed_get_block_attn_data(p_hwfn, block_id, attn_type)->names_offset; + SET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE, attn_type); + SET_FIELD(results->data, + DBG_ATTN_BLOCK_RESULT_NUM_REGS, num_result_regs); + + return DBG_STATUS_OK; +} + +/******************************* Data Types **********************************/ + +/* REG fifo element */ +struct reg_fifo_element { + u64 data; +#define REG_FIFO_ELEMENT_ADDRESS_SHIFT 0 +#define REG_FIFO_ELEMENT_ADDRESS_MASK 0x7fffff +#define REG_FIFO_ELEMENT_ACCESS_SHIFT 23 +#define REG_FIFO_ELEMENT_ACCESS_MASK 0x1 +#define REG_FIFO_ELEMENT_PF_SHIFT 24 +#define REG_FIFO_ELEMENT_PF_MASK 0xf +#define REG_FIFO_ELEMENT_VF_SHIFT 28 +#define REG_FIFO_ELEMENT_VF_MASK 0xff +#define REG_FIFO_ELEMENT_PORT_SHIFT 36 +#define REG_FIFO_ELEMENT_PORT_MASK 0x3 +#define REG_FIFO_ELEMENT_PRIVILEGE_SHIFT 38 +#define REG_FIFO_ELEMENT_PRIVILEGE_MASK 0x3 +#define REG_FIFO_ELEMENT_PROTECTION_SHIFT 40 +#define REG_FIFO_ELEMENT_PROTECTION_MASK 0x7 +#define REG_FIFO_ELEMENT_MASTER_SHIFT 43 +#define REG_FIFO_ELEMENT_MASTER_MASK 0xf +#define REG_FIFO_ELEMENT_ERROR_SHIFT 47 +#define REG_FIFO_ELEMENT_ERROR_MASK 0x1f +}; + +/* REG fifo error element */ +struct reg_fifo_err { + u32 err_code; + const char *err_msg; +}; + +/* IGU fifo element */ +struct igu_fifo_element { + u32 dword0; +#define IGU_FIFO_ELEMENT_DWORD0_FID_SHIFT 0 +#define IGU_FIFO_ELEMENT_DWORD0_FID_MASK 0xff +#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_SHIFT 8 +#define IGU_FIFO_ELEMENT_DWORD0_IS_PF_MASK 0x1 +#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_SHIFT 9 +#define IGU_FIFO_ELEMENT_DWORD0_SOURCE_MASK 0xf +#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_SHIFT 13 +#define IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE_MASK 0xf +#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_SHIFT 17 +#define IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR_MASK 0x7fff + u32 dword1; + u32 dword2; +#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_SHIFT 0 +#define IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD_MASK 0x1 +#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_SHIFT 1 +#define IGU_FIFO_ELEMENT_DWORD12_WR_DATA_MASK 0xffffffff + u32 reserved; +}; + +struct igu_fifo_wr_data { + u32 data; +#define IGU_FIFO_WR_DATA_PROD_CONS_SHIFT 0 +#define IGU_FIFO_WR_DATA_PROD_CONS_MASK 0xffffff +#define IGU_FIFO_WR_DATA_UPDATE_FLAG_SHIFT 24 +#define IGU_FIFO_WR_DATA_UPDATE_FLAG_MASK 0x1 +#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_SHIFT 25 +#define IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB_MASK 0x3 +#define IGU_FIFO_WR_DATA_SEGMENT_SHIFT 27 +#define IGU_FIFO_WR_DATA_SEGMENT_MASK 0x1 +#define IGU_FIFO_WR_DATA_TIMER_MASK_SHIFT 28 +#define IGU_FIFO_WR_DATA_TIMER_MASK_MASK 0x1 +#define IGU_FIFO_WR_DATA_CMD_TYPE_SHIFT 31 +#define IGU_FIFO_WR_DATA_CMD_TYPE_MASK 0x1 +}; + +struct igu_fifo_cleanup_wr_data { + u32 data; +#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_SHIFT 0 +#define IGU_FIFO_CLEANUP_WR_DATA_RESERVED_MASK 0x7ffffff +#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_SHIFT 27 +#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL_MASK 0x1 +#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_SHIFT 28 +#define IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE_MASK 0x7 +#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_SHIFT 31 +#define IGU_FIFO_CLEANUP_WR_DATA_CMD_TYPE_MASK 0x1 +}; + +/* Protection override element */ +struct protection_override_element { + u64 data; +#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_SHIFT 0 +#define PROTECTION_OVERRIDE_ELEMENT_ADDRESS_MASK 0x7fffff +#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_SHIFT 23 +#define PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE_MASK 0xffffff +#define PROTECTION_OVERRIDE_ELEMENT_READ_SHIFT 47 +#define PROTECTION_OVERRIDE_ELEMENT_READ_MASK 0x1 +#define PROTECTION_OVERRIDE_ELEMENT_WRITE_SHIFT 48 +#define PROTECTION_OVERRIDE_ELEMENT_WRITE_MASK 0x1 +#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_SHIFT 49 +#define PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION_MASK 0x7 +#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_SHIFT 52 +#define PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION_MASK 0x7 +}; + +enum igu_fifo_sources { + IGU_SRC_PXP0, + IGU_SRC_PXP1, + IGU_SRC_PXP2, + IGU_SRC_PXP3, + IGU_SRC_PXP4, + IGU_SRC_PXP5, + IGU_SRC_PXP6, + IGU_SRC_PXP7, + IGU_SRC_CAU, + IGU_SRC_ATTN, + IGU_SRC_GRC +}; + +enum igu_fifo_addr_types { + IGU_ADDR_TYPE_MSIX_MEM, + IGU_ADDR_TYPE_WRITE_PBA, + IGU_ADDR_TYPE_WRITE_INT_ACK, + IGU_ADDR_TYPE_WRITE_ATTN_BITS, + IGU_ADDR_TYPE_READ_INT, + IGU_ADDR_TYPE_WRITE_PROD_UPDATE, + IGU_ADDR_TYPE_RESERVED +}; + +struct igu_fifo_addr_data { + u16 start_addr; + u16 end_addr; + char *desc; + char *vf_desc; + enum igu_fifo_addr_types type; +}; + +/******************************** Constants **********************************/ + +#define MAX_MSG_LEN 1024 + +#define MCP_TRACE_MAX_MODULE_LEN 8 +#define MCP_TRACE_FORMAT_MAX_PARAMS 3 +#define MCP_TRACE_FORMAT_PARAM_WIDTH \ + (MCP_TRACE_FORMAT_P2_SIZE_OFFSET - MCP_TRACE_FORMAT_P1_SIZE_OFFSET) + +#define REG_FIFO_ELEMENT_ADDR_FACTOR 4 +#define REG_FIFO_ELEMENT_IS_PF_VF_VAL 127 + +#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 + +/***************************** Constant Arrays *******************************/ + +/* Status string array */ +static const char * const s_status_str[] = { + /* DBG_STATUS_OK */ + "Operation completed successfully", + + /* DBG_STATUS_APP_VERSION_NOT_SET */ + "Debug application version wasn't set", + + /* DBG_STATUS_UNSUPPORTED_APP_VERSION */ + "Unsupported debug application version", + + /* DBG_STATUS_DBG_BLOCK_NOT_RESET */ + "The debug block wasn't reset since the last recording", + + /* DBG_STATUS_INVALID_ARGS */ + "Invalid arguments", + + /* DBG_STATUS_OUTPUT_ALREADY_SET */ + "The debug output was already set", + + /* DBG_STATUS_INVALID_PCI_BUF_SIZE */ + "Invalid PCI buffer size", + + /* DBG_STATUS_PCI_BUF_ALLOC_FAILED */ + "PCI buffer allocation failed", + + /* DBG_STATUS_PCI_BUF_NOT_ALLOCATED */ + "A PCI buffer wasn't allocated", + + /* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */ + "The filter/trigger constraint dword offsets are not enabled for recording", + /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */ + "No matching framing mode", + + /* DBG_STATUS_VFC_READ_ERROR */ + "Error reading from VFC", + + /* DBG_STATUS_STORM_ALREADY_ENABLED */ + "The Storm was already enabled", + + /* DBG_STATUS_STORM_NOT_ENABLED */ + "The specified Storm wasn't enabled", + + /* DBG_STATUS_BLOCK_ALREADY_ENABLED */ + "The block was already enabled", + + /* DBG_STATUS_BLOCK_NOT_ENABLED */ + "The specified block wasn't enabled", + + /* DBG_STATUS_NO_INPUT_ENABLED */ + "No input was enabled for recording", + + /* DBG_STATUS_NO_FILTER_TRIGGER_256B */ + "Filters and triggers are not allowed in E4 256-bit mode", + + /* DBG_STATUS_FILTER_ALREADY_ENABLED */ + "The filter was already enabled", + + /* DBG_STATUS_TRIGGER_ALREADY_ENABLED */ + "The trigger was already enabled", + + /* DBG_STATUS_TRIGGER_NOT_ENABLED */ + "The trigger wasn't enabled", + + /* DBG_STATUS_CANT_ADD_CONSTRAINT */ + "A constraint can be added only after a filter was enabled or a trigger state was added", + + /* DBG_STATUS_TOO_MANY_TRIGGER_STATES */ + "Cannot add more than 3 trigger states", + + /* DBG_STATUS_TOO_MANY_CONSTRAINTS */ + "Cannot add more than 4 constraints per filter or trigger state", + + /* DBG_STATUS_RECORDING_NOT_STARTED */ + "The recording wasn't started", + + /* DBG_STATUS_DATA_DIDNT_TRIGGER */ + "A trigger was configured, but it didn't trigger", + + /* DBG_STATUS_NO_DATA_RECORDED */ + "No data was recorded", + + /* DBG_STATUS_DUMP_BUF_TOO_SMALL */ + "Dump buffer is too small", + + /* DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED */ + "Dumped data is not aligned to chunks", + + /* DBG_STATUS_UNKNOWN_CHIP */ + "Unknown chip", + + /* DBG_STATUS_VIRT_MEM_ALLOC_FAILED */ + "Failed allocating virtual memory", + + /* DBG_STATUS_BLOCK_IN_RESET */ + "The input block is in reset", + + /* DBG_STATUS_INVALID_TRACE_SIGNATURE */ + "Invalid MCP trace signature found in NVRAM", + + /* DBG_STATUS_INVALID_NVRAM_BUNDLE */ + "Invalid bundle ID found in NVRAM", + + /* DBG_STATUS_NVRAM_GET_IMAGE_FAILED */ + "Failed getting NVRAM image", + + /* DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE */ + "NVRAM image is not dword-aligned", + + /* DBG_STATUS_NVRAM_READ_FAILED */ + "Failed reading from NVRAM", + + /* DBG_STATUS_IDLE_CHK_PARSE_FAILED */ + "Idle check parsing failed", + + /* DBG_STATUS_MCP_TRACE_BAD_DATA */ + "MCP Trace data is corrupt", + + /* DBG_STATUS_MCP_TRACE_NO_META */ + "Dump doesn't contain meta data - it must be provided in image file", + + /* DBG_STATUS_MCP_COULD_NOT_HALT */ + "Failed to halt MCP", + + /* DBG_STATUS_MCP_COULD_NOT_RESUME */ + "Failed to resume MCP after halt", + + /* DBG_STATUS_RESERVED0 */ + "", + + /* DBG_STATUS_SEMI_FIFO_NOT_EMPTY */ + "Failed to empty SEMI sync FIFO", + + /* DBG_STATUS_IGU_FIFO_BAD_DATA */ + "IGU FIFO data is corrupt", + + /* DBG_STATUS_MCP_COULD_NOT_MASK_PRTY */ + "MCP failed to mask parities", + + /* DBG_STATUS_FW_ASSERTS_PARSE_FAILED */ + "FW Asserts parsing failed", + + /* DBG_STATUS_REG_FIFO_BAD_DATA */ + "GRC FIFO data is corrupt", + + /* DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA */ + "Protection Override data is corrupt", + + /* DBG_STATUS_DBG_ARRAY_NOT_SET */ + "Debug arrays were not set (when using binary files, dbg_set_bin_ptr must be called)", + + /* DBG_STATUS_RESERVED1 */ + "", + + /* DBG_STATUS_NON_MATCHING_LINES */ + "Non-matching debug lines - in E4, all lines must be of the same type (either 128b or 256b)", + + /* DBG_STATUS_INSUFFICIENT_HW_IDS */ + "Insufficient HW IDs. Try to record less Storms/blocks", + + /* DBG_STATUS_DBG_BUS_IN_USE */ + "The debug bus is in use", + + /* DBG_STATUS_INVALID_STORM_DBG_MODE */ + "The storm debug mode is not supported in the current chip", + + /* DBG_STATUS_OTHER_ENGINE_BB_ONLY */ + "Other engine is supported only in BB", + + /* DBG_STATUS_FILTER_SINGLE_HW_ID */ + "The configured filter mode requires a single Storm/block input", + + /* DBG_STATUS_TRIGGER_SINGLE_HW_ID */ + "The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input", + + /* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */ + "When triggering on Storm data, the Storm to trigger on must be specified", + + /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */ + "Failed to request MDUMP2 Offsize", + + /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */ + "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data", + + /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */ + "Invalid Signature found at start of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */ + "Invalid Log Size of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */ + "Invalid Log Header of MDUMP2", + + /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */ + "Invalid Log Data of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */ + "Could not extract number of ports from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */ + "Could not extract MFW (link) status from regval buf of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */ + "Could not display linkdump of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */ + "Could not read PHY CFG of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */ + "Could not read PLL Mode of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */ + "Could not read TSCF/TSCE Lane Regs of MDUMP2", + + /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */ + "Could not allocate MDUMP2 reg-val internal buffer" +}; + +/* Idle check severity names array */ +static const char * const s_idle_chk_severity_str[] = { + "Error", + "Error if no traffic", + "Warning" +}; + +/* MCP Trace level names array */ +static const char * const s_mcp_trace_level_str[] = { + "ERROR", + "TRACE", + "DEBUG" +}; + +/* Access type names array */ +static const char * const s_access_strs[] = { + "read", + "write" +}; + +/* Privilege type names array */ +static const char * const s_privilege_strs[] = { + "VF", + "PDA", + "HV", + "UA" +}; + +/* Protection type names array */ +static const char * const s_protection_strs[] = { + "(default)", + "(default)", + "(default)", + "(default)", + "override VF", + "override PDA", + "override HV", + "override UA" +}; + +/* Master type names array */ +static const char * const s_master_strs[] = { + "???", + "pxp", + "mcp", + "msdm", + "psdm", + "ysdm", + "usdm", + "tsdm", + "xsdm", + "dbu", + "dmae", + "jdap", + "???", + "???", + "???", + "???" +}; + +/* REG FIFO error messages array */ +static struct reg_fifo_err s_reg_fifo_errors[] = { + {1, "grc timeout"}, + {2, "address doesn't belong to any block"}, + {4, "reserved address in block or write to read-only address"}, + {8, "privilege/protection mismatch"}, + {16, "path isolation error"}, + {17, "RSL error"} +}; + +/* IGU FIFO sources array */ +static const char * const s_igu_fifo_source_strs[] = { + "TSTORM", + "MSTORM", + "USTORM", + "XSTORM", + "YSTORM", + "PSTORM", + "PCIE", + "NIG_QM_PBF", + "CAU", + "ATTN", + "GRC", +}; + +/* IGU FIFO error messages */ +static const char * const s_igu_fifo_error_strs[] = { + "no error", + "length error", + "function disabled", + "VF sent command to attention address", + "host sent prod update command", + "read of during interrupt register while in MIMD mode", + "access to PXP BAR reserved address", + "producer update command to attention index", + "unknown error", + "SB index not valid", + "SB relative index and FID not found", + "FID not match", + "command with error flag asserted (PCI error or CAU discard)", + "VF sent cleanup and RF cleanup is disabled", + "cleanup command on type bigger than 4" +}; + +/* IGU FIFO address data */ +static const struct igu_fifo_addr_data s_igu_fifo_addr_data[] = { + {0x0, 0x101, "MSI-X Memory", NULL, + IGU_ADDR_TYPE_MSIX_MEM}, + {0x102, 0x1ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x200, 0x200, "Write PBA[0:63]", NULL, + IGU_ADDR_TYPE_WRITE_PBA}, + {0x201, 0x201, "Write PBA[64:127]", "reserved", + IGU_ADDR_TYPE_WRITE_PBA}, + {0x202, 0x202, "Write PBA[128]", "reserved", + IGU_ADDR_TYPE_WRITE_PBA}, + {0x203, 0x3ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x400, 0x5ef, "Write interrupt acknowledgment", NULL, + IGU_ADDR_TYPE_WRITE_INT_ACK}, + {0x5f0, 0x5f0, "Attention bits update", NULL, + IGU_ADDR_TYPE_WRITE_ATTN_BITS}, + {0x5f1, 0x5f1, "Attention bits set", NULL, + IGU_ADDR_TYPE_WRITE_ATTN_BITS}, + {0x5f2, 0x5f2, "Attention bits clear", NULL, + IGU_ADDR_TYPE_WRITE_ATTN_BITS}, + {0x5f3, 0x5f3, "Read interrupt 0:63 with mask", NULL, + IGU_ADDR_TYPE_READ_INT}, + {0x5f4, 0x5f4, "Read interrupt 0:31 with mask", NULL, + IGU_ADDR_TYPE_READ_INT}, + {0x5f5, 0x5f5, "Read interrupt 32:63 with mask", NULL, + IGU_ADDR_TYPE_READ_INT}, + {0x5f6, 0x5f6, "Read interrupt 0:63 without mask", NULL, + IGU_ADDR_TYPE_READ_INT}, + {0x5f7, 0x5ff, "reserved", NULL, + IGU_ADDR_TYPE_RESERVED}, + {0x600, 0x7ff, "Producer update", NULL, + IGU_ADDR_TYPE_WRITE_PROD_UPDATE} +}; + +/******************************** Variables **********************************/ + +/* Temporary buffer, used for print size calculations */ +static char s_temp_buf[MAX_MSG_LEN]; + +/**************************** Private Functions ******************************/ + +static void qed_user_static_asserts(void) +{ +} + +static u32 qed_cyclic_add(u32 a, u32 b, u32 size) +{ + return (a + b) % size; +} + +static u32 qed_cyclic_sub(u32 a, u32 b, u32 size) +{ + return (size + a - b) % size; +} + +/* Reads the specified number of bytes from the specified cyclic buffer (up to 4 + * bytes) and returns them as a dword value. the specified buffer offset is + * updated. + */ +static u32 qed_read_from_cyclic_buf(void *buf, + u32 *offset, + u32 buf_size, u8 num_bytes_to_read) +{ + u8 i, *val_ptr, *bytes_buf = (u8 *)buf; + u32 val = 0; + + val_ptr = (u8 *)&val; + + /* Assume running on a LITTLE ENDIAN and the buffer is network order + * (BIG ENDIAN), as high order bytes are placed in lower memory address. + */ + for (i = 0; i < num_bytes_to_read; i++) { + val_ptr[i] = bytes_buf[*offset]; + *offset = qed_cyclic_add(*offset, 1, buf_size); + } + + return val; +} + +/* Reads and returns the next byte from the specified buffer. + * The specified buffer offset is updated. + */ +static u8 qed_read_byte_from_buf(void *buf, u32 *offset) +{ + return ((u8 *)buf)[(*offset)++]; +} + +/* Reads and returns the next dword from the specified buffer. + * The specified buffer offset is updated. + */ +static u32 qed_read_dword_from_buf(void *buf, u32 *offset) +{ + u32 dword_val = *(u32 *)&((u8 *)buf)[*offset]; + + *offset += 4; + + return dword_val; +} + +/* Reads the next string from the specified buffer, and copies it to the + * specified pointer. The specified buffer offset is updated. + */ +static void qed_read_str_from_buf(void *buf, u32 *offset, u32 size, char *dest) +{ + const char *source_str = &((const char *)buf)[*offset]; + + strncpy(dest, source_str, size); + dest[size - 1] = '\0'; + *offset += size; +} + +/* Returns a pointer to the specified offset (in bytes) of the specified buffer. + * If the specified buffer in NULL, a temporary buffer pointer is returned. + */ +static char *qed_get_buf_ptr(void *buf, u32 offset) +{ + return buf ? (char *)buf + offset : s_temp_buf; +} + +/* Reads a param from the specified buffer. Returns the number of dwords read. + * If the returned str_param is NULL, the param is numeric and its value is + * returned in num_param. + * Otheriwise, the param is a string and its pointer is returned in str_param. + */ +static u32 qed_read_param(u32 *dump_buf, + const char **param_name, + const char **param_str_val, u32 *param_num_val) +{ + char *char_buf = (char *)dump_buf; + size_t offset = 0; + + /* Extract param name */ + *param_name = char_buf; + offset += strlen(*param_name) + 1; + + /* Check param type */ + if (*(char_buf + offset++)) { + /* String param */ + *param_str_val = char_buf + offset; + *param_num_val = 0; + offset += strlen(*param_str_val) + 1; + if (offset & 0x3) + offset += (4 - (offset & 0x3)); + } else { + /* Numeric param */ + *param_str_val = NULL; + if (offset & 0x3) + offset += (4 - (offset & 0x3)); + *param_num_val = *(u32 *)(char_buf + offset); + offset += 4; + } + + return (u32)offset / 4; +} + +/* Reads a section header from the specified buffer. + * Returns the number of dwords read. + */ +static u32 qed_read_section_hdr(u32 *dump_buf, + const char **section_name, + u32 *num_section_params) +{ + const char *param_str_val; + + return qed_read_param(dump_buf, + section_name, ¶m_str_val, num_section_params); +} + +/* Reads section params from the specified buffer and prints them to the results + * buffer. Returns the number of dwords read. + */ +static u32 qed_print_section_params(u32 *dump_buf, + u32 num_section_params, + char *results_buf, u32 *num_chars_printed) +{ + u32 i, dump_offset = 0, results_offset = 0; + + for (i = 0; i < num_section_params; i++) { + const char *param_name, *param_str_val; + u32 param_num_val = 0; + + dump_offset += qed_read_param(dump_buf + dump_offset, + ¶m_name, + ¶m_str_val, ¶m_num_val); + + if (param_str_val) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "%s: %s\n", param_name, param_str_val); + else if (strcmp(param_name, "fw-timestamp")) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "%s: %d\n", param_name, param_num_val); + } + + results_offset += sprintf(qed_get_buf_ptr(results_buf, results_offset), + "\n"); + + *num_chars_printed = results_offset; + + return dump_offset; +} + +/* Returns the block name that matches the specified block ID, + * or NULL if not found. + */ +static const char *qed_dbg_get_block_name(struct qed_hwfn *p_hwfn, + enum block_id block_id) +{ + const struct dbg_block_user *block = + (const struct dbg_block_user *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_BLOCKS_USER_DATA].ptr + block_id; + + return (const char *)block->name; +} + +static struct dbg_tools_user_data *qed_dbg_get_user_data(struct qed_hwfn + *p_hwfn) +{ + return (struct dbg_tools_user_data *)p_hwfn->dbg_user_info; +} + +/* Parses the idle check rules and returns the number of characters printed. + * In case of parsing error, returns 0. + */ +static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 *dump_buf_end, + u32 num_rules, + bool print_fw_idle_chk, + char *results_buf, + u32 *num_errors, u32 *num_warnings) +{ + /* Offset in results_buf in bytes */ + u32 results_offset = 0; + + u32 rule_idx; + u16 i, j; + + *num_errors = 0; + *num_warnings = 0; + + /* Go over dumped results */ + for (rule_idx = 0; rule_idx < num_rules && dump_buf < dump_buf_end; + rule_idx++) { + const struct dbg_idle_chk_rule_parsing_data *rule_parsing_data; + struct dbg_idle_chk_result_hdr *hdr; + const char *parsing_str, *lsi_msg; + u32 parsing_str_offset; + bool has_fw_msg; + u8 curr_reg_id; + + hdr = (struct dbg_idle_chk_result_hdr *)dump_buf; + rule_parsing_data = + (const struct dbg_idle_chk_rule_parsing_data *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr + + hdr->rule_id; + parsing_str_offset = + GET_FIELD(rule_parsing_data->data, + DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET); + has_fw_msg = + GET_FIELD(rule_parsing_data->data, + DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG) > 0; + parsing_str = (const char *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr + + parsing_str_offset; + lsi_msg = parsing_str; + curr_reg_id = 0; + + if (hdr->severity >= MAX_DBG_IDLE_CHK_SEVERITY_TYPES) + return 0; + + /* Skip rule header */ + dump_buf += BYTES_TO_DWORDS(sizeof(*hdr)); + + /* Update errors/warnings count */ + if (hdr->severity == IDLE_CHK_SEVERITY_ERROR || + hdr->severity == IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC) + (*num_errors)++; + else + (*num_warnings)++; + + /* Print rule severity */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), "%s: ", + s_idle_chk_severity_str[hdr->severity]); + + /* Print rule message */ + if (has_fw_msg) + parsing_str += strlen(parsing_str) + 1; + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), "%s.", + has_fw_msg && + print_fw_idle_chk ? parsing_str : lsi_msg); + parsing_str += strlen(parsing_str) + 1; + + /* Print register values */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), " Registers:"); + for (i = 0; + i < hdr->num_dumped_cond_regs + hdr->num_dumped_info_regs; + i++) { + struct dbg_idle_chk_result_reg_hdr *reg_hdr; + bool is_mem; + u8 reg_id; + + reg_hdr = + (struct dbg_idle_chk_result_reg_hdr *)dump_buf; + is_mem = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM); + reg_id = GET_FIELD(reg_hdr->data, + DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID); + + /* Skip reg header */ + dump_buf += BYTES_TO_DWORDS(sizeof(*reg_hdr)); + + /* Skip register names until the required reg_id is + * reached. + */ + for (; reg_id > curr_reg_id; curr_reg_id++) + parsing_str += strlen(parsing_str) + 1; + + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), " %s", + parsing_str); + if (i < hdr->num_dumped_cond_regs && is_mem) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "[%d]", hdr->mem_entry_id + + reg_hdr->start_entry); + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), "="); + for (j = 0; j < reg_hdr->size; j++, dump_buf++) { + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "0x%x", *dump_buf); + if (j < reg_hdr->size - 1) + results_offset += + sprintf(qed_get_buf_ptr + (results_buf, + results_offset), ","); + } + } + + results_offset += + sprintf(qed_get_buf_ptr(results_buf, results_offset), "\n"); + } + + /* Check if end of dump buffer was exceeded */ + if (dump_buf > dump_buf_end) + return 0; + + return results_offset; +} + +/* Parses an idle check dump buffer. + * If result_buf is not NULL, the idle check results are printed to it. + * In any case, the required results buffer size is assigned to + * parsed_results_bytes. + * The parsing status is returned. + */ +static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *parsed_results_bytes, + u32 *num_errors, + u32 *num_warnings) +{ + u32 num_section_params = 0, num_rules, num_rules_not_dumped; + const char *section_name, *param_name, *param_str_val; + u32 *dump_buf_end = dump_buf + num_dumped_dwords; + + /* Offset in results_buf in bytes */ + u32 results_offset = 0; + + *parsed_results_bytes = 0; + *num_errors = 0; + *num_warnings = 0; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_IDLE_CHK_PARSING_DATA].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + /* Read idle_chk section + * There may be 1 or 2 idle_chk section parameters: + * - 1st is "num_rules" + * - 2nd is "num_rules_not_dumped" (optional) + */ + + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "idle_chk") || + (num_section_params != 2 && num_section_params != 1)) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, &num_rules); + if (strcmp(param_name, "num_rules")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + if (num_section_params > 1) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + &num_rules_not_dumped); + if (strcmp(param_name, "num_rules_not_dumped")) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + } else { + num_rules_not_dumped = 0; + } + + if (num_rules) { + u32 rules_print_size; + + /* Print FW output */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "FW_IDLE_CHECK:\n"); + rules_print_size = + qed_parse_idle_chk_dump_rules(p_hwfn, + dump_buf, + dump_buf_end, + num_rules, + true, + results_buf ? + results_buf + + results_offset : + NULL, + num_errors, + num_warnings); + results_offset += rules_print_size; + if (!rules_print_size) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + + /* Print LSI output */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nLSI_IDLE_CHECK:\n"); + rules_print_size = + qed_parse_idle_chk_dump_rules(p_hwfn, + dump_buf, + dump_buf_end, + num_rules, + false, + results_buf ? + results_buf + + results_offset : + NULL, + num_errors, + num_warnings); + results_offset += rules_print_size; + if (!rules_print_size) + return DBG_STATUS_IDLE_CHK_PARSE_FAILED; + } + + /* Print errors/warnings count */ + if (*num_errors) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check failed!!! (with %d errors and %d warnings)\n", + *num_errors, *num_warnings); + else if (*num_warnings) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check completed successfully (with %d warnings)\n", + *num_warnings); + else + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check completed successfully\n"); + + if (num_rules_not_dumped) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n", + num_rules_not_dumped); + + /* Add 1 for string NULL termination */ + *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +/* Allocates and fills MCP Trace meta data based on the specified meta data + * dump buffer. + * Returns debug status code. + */ +static enum dbg_status +qed_mcp_trace_alloc_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) +{ + struct dbg_tools_user_data *dev_user_data; + u32 offset = 0, signature, i; + struct mcp_trace_meta *meta; + u8 *meta_buf_bytes; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + meta_buf_bytes = (u8 *)meta_buf; + + /* Free the previous meta before loading a new one. */ + if (meta->is_allocated) + qed_mcp_trace_free_meta_data(p_hwfn); + + memset(meta, 0, sizeof(*meta)); + + /* Read first signature */ + signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); + if (signature != NVM_MAGIC_VALUE) + return DBG_STATUS_INVALID_TRACE_SIGNATURE; + + /* Read no. of modules and allocate memory for their pointers */ + meta->modules_num = qed_read_byte_from_buf(meta_buf_bytes, &offset); + meta->modules = kcalloc(meta->modules_num, sizeof(char *), + GFP_KERNEL); + if (!meta->modules) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + /* Allocate and read all module strings */ + for (i = 0; i < meta->modules_num; i++) { + u8 module_len = qed_read_byte_from_buf(meta_buf_bytes, &offset); + + *(meta->modules + i) = kzalloc(module_len, GFP_KERNEL); + if (!(*(meta->modules + i))) { + /* Update number of modules to be released */ + meta->modules_num = i ? i - 1 : 0; + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } + + qed_read_str_from_buf(meta_buf_bytes, &offset, module_len, + *(meta->modules + i)); + if (module_len > MCP_TRACE_MAX_MODULE_LEN) + (*(meta->modules + i))[MCP_TRACE_MAX_MODULE_LEN] = '\0'; + } + + /* Read second signature */ + signature = qed_read_dword_from_buf(meta_buf_bytes, &offset); + if (signature != NVM_MAGIC_VALUE) + return DBG_STATUS_INVALID_TRACE_SIGNATURE; + + /* Read number of formats and allocate memory for all formats */ + meta->formats_num = qed_read_dword_from_buf(meta_buf_bytes, &offset); + meta->formats = kcalloc(meta->formats_num, + sizeof(struct mcp_trace_format), + GFP_KERNEL); + if (!meta->formats) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + /* Allocate and read all strings */ + for (i = 0; i < meta->formats_num; i++) { + struct mcp_trace_format *format_ptr = &meta->formats[i]; + u8 format_len; + + format_ptr->data = qed_read_dword_from_buf(meta_buf_bytes, + &offset); + format_len = GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_LEN); + format_ptr->format_str = kzalloc(format_len, GFP_KERNEL); + if (!format_ptr->format_str) { + /* Update number of modules to be released */ + meta->formats_num = i ? i - 1 : 0; + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } + + qed_read_str_from_buf(meta_buf_bytes, + &offset, + format_len, format_ptr->format_str); + } + + meta->is_allocated = true; + return DBG_STATUS_OK; +} + +/* Parses an MCP trace buffer. If result_buf is not NULL, the MCP Trace results + * are printed to it. The parsing status is returned. + * Arguments: + * trace_buf - MCP trace cyclic buffer + * trace_buf_size - MCP trace cyclic buffer size in bytes + * data_offset - offset in bytes of the data to parse in the MCP trace cyclic + * buffer. + * data_size - size in bytes of data to parse. + * parsed_buf - destination buffer for parsed data. + * parsed_results_bytes - size of parsed data in bytes. + */ +static enum dbg_status qed_parse_mcp_trace_buf(struct qed_hwfn *p_hwfn, + u8 *trace_buf, + u32 trace_buf_size, + u32 data_offset, + u32 data_size, + char *parsed_buf, + u32 *parsed_results_bytes) +{ + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; + u32 param_mask, param_shift; + enum dbg_status status; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + *parsed_results_bytes = 0; + + if (!meta->is_allocated) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + status = DBG_STATUS_OK; + + while (data_size) { + struct mcp_trace_format *format_ptr; + u8 format_level, format_module; + u32 params[3] = { 0, 0, 0 }; + u32 header, format_idx, i; + + if (data_size < MFW_TRACE_ENTRY_SIZE) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + header = qed_read_from_cyclic_buf(trace_buf, + &data_offset, + trace_buf_size, + MFW_TRACE_ENTRY_SIZE); + data_size -= MFW_TRACE_ENTRY_SIZE; + format_idx = header & MFW_TRACE_EVENTID_MASK; + + /* Skip message if its index doesn't exist in the meta data */ + if (format_idx >= meta->formats_num) { + u8 format_size = (u8)GET_MFW_FIELD(header, + MFW_TRACE_PRM_SIZE); + + if (data_size < format_size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + data_offset = qed_cyclic_add(data_offset, + format_size, + trace_buf_size); + data_size -= format_size; + continue; + } + + format_ptr = &meta->formats[format_idx]; + + for (i = 0, + param_mask = MCP_TRACE_FORMAT_P1_SIZE_MASK, param_shift = + MCP_TRACE_FORMAT_P1_SIZE_OFFSET; + i < MCP_TRACE_FORMAT_MAX_PARAMS; + i++, param_mask <<= MCP_TRACE_FORMAT_PARAM_WIDTH, + param_shift += MCP_TRACE_FORMAT_PARAM_WIDTH) { + /* Extract param size (0..3) */ + u8 param_size = (u8)((format_ptr->data & param_mask) >> + param_shift); + + /* If the param size is zero, there are no other + * parameters. + */ + if (!param_size) + break; + + /* Size is encoded using 2 bits, where 3 is used to + * encode 4. + */ + if (param_size == 3) + param_size = 4; + + if (data_size < param_size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + params[i] = qed_read_from_cyclic_buf(trace_buf, + &data_offset, + trace_buf_size, + param_size); + data_size -= param_size; + } + + format_level = (u8)GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_LEVEL); + format_module = (u8)GET_MFW_FIELD(format_ptr->data, + MCP_TRACE_FORMAT_MODULE); + if (format_level >= ARRAY_SIZE(s_mcp_trace_level_str)) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + /* Print current message to results buffer */ + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, + *parsed_results_bytes), + "%s %-8s: ", + s_mcp_trace_level_str[format_level], + meta->modules[format_module]); + *parsed_results_bytes += + sprintf(qed_get_buf_ptr(parsed_buf, *parsed_results_bytes), + format_ptr->format_str, + params[0], params[1], params[2]); + } + + /* Add string NULL terminator */ + (*parsed_results_bytes)++; + + return status; +} + +/* Parses an MCP Trace dump buffer. + * If result_buf is not NULL, the MCP Trace results are printed to it. + * In any case, the required results buffer size is assigned to + * parsed_results_bytes. + * The parsing status is returned. + */ +static enum dbg_status qed_parse_mcp_trace_dump(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf, + u32 *parsed_results_bytes, + bool free_meta_data) +{ + const char *section_name, *param_name, *param_str_val; + u32 data_size, trace_data_dwords, trace_meta_dwords; + u32 offset, results_offset, results_buf_bytes; + u32 param_num_val, num_section_params; + struct mcp_trace *trace; + enum dbg_status status; + const u32 *meta_buf; + u8 *trace_buf; + + *parsed_results_bytes = 0; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + /* Read trace_data section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "mcp_trace_data") || num_section_params != 1) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, ¶m_num_val); + if (strcmp(param_name, "size")) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + trace_data_dwords = param_num_val; + + /* Prepare trace info */ + trace = (struct mcp_trace *)dump_buf; + if (trace->signature != MFW_TRACE_SIGNATURE || !trace->size) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + + trace_buf = (u8 *)dump_buf + sizeof(*trace); + offset = trace->trace_oldest; + data_size = qed_cyclic_sub(trace->trace_prod, offset, trace->size); + dump_buf += trace_data_dwords; + + /* Read meta_data section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "mcp_trace_meta")) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, ¶m_num_val); + if (strcmp(param_name, "size")) + return DBG_STATUS_MCP_TRACE_BAD_DATA; + trace_meta_dwords = param_num_val; + + /* Choose meta data buffer */ + if (!trace_meta_dwords) { + /* Dump doesn't include meta data */ + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + if (!dev_user_data->mcp_trace_user_meta_buf) + return DBG_STATUS_MCP_TRACE_NO_META; + + meta_buf = dev_user_data->mcp_trace_user_meta_buf; + } else { + /* Dump includes meta data */ + meta_buf = dump_buf; + } + + /* Allocate meta data memory */ + status = qed_mcp_trace_alloc_meta_data(p_hwfn, meta_buf); + if (status != DBG_STATUS_OK) + return status; + + status = qed_parse_mcp_trace_buf(p_hwfn, + trace_buf, + trace->size, + offset, + data_size, + results_buf ? + results_buf + results_offset : + NULL, + &results_buf_bytes); + if (status != DBG_STATUS_OK) + return status; + + if (free_meta_data) + qed_mcp_trace_free_meta_data(p_hwfn); + + *parsed_results_bytes = results_offset + results_buf_bytes; + + return DBG_STATUS_OK; +} + +/* Parses a Reg FIFO dump buffer. + * If result_buf is not NULL, the Reg FIFO results are printed to it. + * In any case, the required results buffer size is assigned to + * parsed_results_bytes. + * The parsing status is returned. + */ +static enum dbg_status qed_parse_reg_fifo_dump(u32 *dump_buf, + char *results_buf, + u32 *parsed_results_bytes) +{ + const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; + struct reg_fifo_element *elements; + u8 i, j, err_code, vf_val; + u32 results_offset = 0; + char vf_str[4]; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_REG_FIFO_BAD_DATA; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + /* Read reg_fifo_data section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "reg_fifo_data")) + return DBG_STATUS_REG_FIFO_BAD_DATA; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, ¶m_num_val); + if (strcmp(param_name, "size")) + return DBG_STATUS_REG_FIFO_BAD_DATA; + if (param_num_val % REG_FIFO_ELEMENT_DWORDS) + return DBG_STATUS_REG_FIFO_BAD_DATA; + num_elements = param_num_val / REG_FIFO_ELEMENT_DWORDS; + elements = (struct reg_fifo_element *)dump_buf; + + /* Decode elements */ + for (i = 0; i < num_elements; i++) { + const char *err_msg = NULL; + + /* Discover if element belongs to a VF or a PF */ + vf_val = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_VF); + if (vf_val == REG_FIFO_ELEMENT_IS_PF_VF_VAL) + sprintf(vf_str, "%s", "N/A"); + else + sprintf(vf_str, "%d", vf_val); + + /* Find error message */ + err_code = GET_FIELD(elements[i].data, REG_FIFO_ELEMENT_ERROR); + for (j = 0; j < ARRAY_SIZE(s_reg_fifo_errors) && !err_msg; j++) + if (err_code == s_reg_fifo_errors[j].err_code) + err_msg = s_reg_fifo_errors[j].err_msg; + + /* Add parsed element to parsed buffer */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "raw: 0x%016llx, address: 0x%07x, access: %-5s, pf: %2d, vf: %s, port: %d, privilege: %-3s, protection: %-12s, master: %-4s, error: %s\n", + elements[i].data, + (u32)GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_ADDRESS) * + REG_FIFO_ELEMENT_ADDR_FACTOR, + s_access_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_ACCESS)], + (u32)GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PF), + vf_str, + (u32)GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PORT), + s_privilege_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PRIVILEGE)], + s_protection_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_PROTECTION)], + s_master_strs[GET_FIELD(elements[i].data, + REG_FIFO_ELEMENT_MASTER)], + err_msg ? err_msg : "unknown error code"); + } + + results_offset += sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "fifo contained %d elements", num_elements); + + /* Add 1 for string NULL termination */ + *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +static enum dbg_status qed_parse_igu_fifo_element(struct igu_fifo_element + *element, char + *results_buf, + u32 *results_offset) +{ + const struct igu_fifo_addr_data *found_addr = NULL; + u8 source, err_type, i, is_cleanup; + char parsed_addr_data[32]; + char parsed_wr_data[256]; + u32 wr_data, prod_cons; + bool is_wr_cmd, is_pf; + u16 cmd_addr; + u64 dword12; + + /* Dword12 (dword index 1 and 2) contains bits 32..95 of the + * FIFO element. + */ + dword12 = ((u64)element->dword2 << 32) | element->dword1; + is_wr_cmd = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_IS_WR_CMD); + is_pf = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_IS_PF); + cmd_addr = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_CMD_ADDR); + source = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_SOURCE); + err_type = GET_FIELD(element->dword0, IGU_FIFO_ELEMENT_DWORD0_ERR_TYPE); + + if (source >= ARRAY_SIZE(s_igu_fifo_source_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + if (err_type >= ARRAY_SIZE(s_igu_fifo_error_strs)) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + + /* Find address data */ + for (i = 0; i < ARRAY_SIZE(s_igu_fifo_addr_data) && !found_addr; i++) { + const struct igu_fifo_addr_data *curr_addr = + &s_igu_fifo_addr_data[i]; + + if (cmd_addr >= curr_addr->start_addr && cmd_addr <= + curr_addr->end_addr) + found_addr = curr_addr; + } + + if (!found_addr) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + + /* Prepare parsed address data */ + switch (found_addr->type) { + case IGU_ADDR_TYPE_MSIX_MEM: + sprintf(parsed_addr_data, " vector_num = 0x%x", cmd_addr / 2); + break; + case IGU_ADDR_TYPE_WRITE_INT_ACK: + case IGU_ADDR_TYPE_WRITE_PROD_UPDATE: + sprintf(parsed_addr_data, + " SB = 0x%x", cmd_addr - found_addr->start_addr); + break; + default: + parsed_addr_data[0] = '\0'; + } + + if (!is_wr_cmd) { + parsed_wr_data[0] = '\0'; + goto out; + } + + /* Prepare parsed write data */ + wr_data = GET_FIELD(dword12, IGU_FIFO_ELEMENT_DWORD12_WR_DATA); + prod_cons = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_PROD_CONS); + is_cleanup = GET_FIELD(wr_data, IGU_FIFO_WR_DATA_CMD_TYPE); + + if (source == IGU_SRC_ATTN) { + sprintf(parsed_wr_data, "prod: 0x%x, ", prod_cons); + } else { + if (is_cleanup) { + u8 cleanup_val, cleanup_type; + + cleanup_val = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_VAL); + cleanup_type = + GET_FIELD(wr_data, + IGU_FIFO_CLEANUP_WR_DATA_CLEANUP_TYPE); + + sprintf(parsed_wr_data, + "cmd_type: cleanup, cleanup_val: %s, cleanup_type : %d, ", + cleanup_val ? "set" : "clear", + cleanup_type); + } else { + u8 update_flag, en_dis_int_for_sb, segment; + u8 timer_mask; + + update_flag = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_UPDATE_FLAG); + en_dis_int_for_sb = + GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_EN_DIS_INT_FOR_SB); + segment = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_SEGMENT); + timer_mask = GET_FIELD(wr_data, + IGU_FIFO_WR_DATA_TIMER_MASK); + + sprintf(parsed_wr_data, + "cmd_type: prod/cons update, prod/cons: 0x%x, update_flag: %s, en_dis_int_for_sb : %s, segment : %s, timer_mask = %d, ", + prod_cons, + update_flag ? "update" : "nop", + en_dis_int_for_sb ? + (en_dis_int_for_sb == 1 ? "disable" : "nop") : + "enable", + segment ? "attn" : "regular", + timer_mask); + } + } +out: + /* Add parsed element to parsed buffer */ + *results_offset += sprintf(qed_get_buf_ptr(results_buf, + *results_offset), + "raw: 0x%01x%08x%08x, %s: %d, source : %s, type : %s, cmd_addr : 0x%x(%s%s), %serror: %s\n", + element->dword2, element->dword1, + element->dword0, + is_pf ? "pf" : "vf", + GET_FIELD(element->dword0, + IGU_FIFO_ELEMENT_DWORD0_FID), + s_igu_fifo_source_strs[source], + is_wr_cmd ? "wr" : "rd", + cmd_addr, + (!is_pf && found_addr->vf_desc) + ? found_addr->vf_desc + : found_addr->desc, + parsed_addr_data, + parsed_wr_data, + s_igu_fifo_error_strs[err_type]); + + return DBG_STATUS_OK; +} + +/* Parses an IGU FIFO dump buffer. + * If result_buf is not NULL, the IGU FIFO results are printed to it. + * In any case, the required results buffer size is assigned to + * parsed_results_bytes. + * The parsing status is returned. + */ +static enum dbg_status qed_parse_igu_fifo_dump(u32 *dump_buf, + char *results_buf, + u32 *parsed_results_bytes) +{ + const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; + struct igu_fifo_element *elements; + enum dbg_status status; + u32 results_offset = 0; + u8 i; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + /* Read igu_fifo_data section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "igu_fifo_data")) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, ¶m_num_val); + if (strcmp(param_name, "size")) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + if (param_num_val % IGU_FIFO_ELEMENT_DWORDS) + return DBG_STATUS_IGU_FIFO_BAD_DATA; + num_elements = param_num_val / IGU_FIFO_ELEMENT_DWORDS; + elements = (struct igu_fifo_element *)dump_buf; + + /* Decode elements */ + for (i = 0; i < num_elements; i++) { + status = qed_parse_igu_fifo_element(&elements[i], + results_buf, + &results_offset); + if (status != DBG_STATUS_OK) + return status; + } + + results_offset += sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "fifo contained %d elements", num_elements); + + /* Add 1 for string NULL termination */ + *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +static enum dbg_status +qed_parse_protection_override_dump(u32 *dump_buf, + char *results_buf, + u32 *parsed_results_bytes) +{ + const char *section_name, *param_name, *param_str_val; + u32 param_num_val, num_section_params, num_elements; + struct protection_override_element *elements; + u32 results_offset = 0; + u8 i; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + /* Read protection_override_data section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "protection_override_data")) + return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; + dump_buf += qed_read_param(dump_buf, + ¶m_name, ¶m_str_val, ¶m_num_val); + if (strcmp(param_name, "size")) + return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; + if (param_num_val % PROTECTION_OVERRIDE_ELEMENT_DWORDS) + return DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA; + num_elements = param_num_val / PROTECTION_OVERRIDE_ELEMENT_DWORDS; + elements = (struct protection_override_element *)dump_buf; + + /* Decode elements */ + for (i = 0; i < num_elements; i++) { + u32 address = GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_ADDRESS) * + PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR; + + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "window %2d, address: 0x%07x, size: %7d regs, read: %d, write: %d, read protection: %-12s, write protection: %-12s\n", + i, address, + (u32)GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_WINDOW_SIZE), + (u32)GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_READ), + (u32)GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_WRITE), + s_protection_strs[GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_READ_PROTECTION)], + s_protection_strs[GET_FIELD(elements[i].data, + PROTECTION_OVERRIDE_ELEMENT_WRITE_PROTECTION)]); + } + + results_offset += sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "protection override contained %d elements", + num_elements); + + /* Add 1 for string NULL termination */ + *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +/* Parses a FW Asserts dump buffer. + * If result_buf is not NULL, the FW Asserts results are printed to it. + * In any case, the required results buffer size is assigned to + * parsed_results_bytes. + * The parsing status is returned. + */ +static enum dbg_status qed_parse_fw_asserts_dump(u32 *dump_buf, + char *results_buf, + u32 *parsed_results_bytes) +{ + u32 num_section_params, param_num_val, i, results_offset = 0; + const char *param_name, *param_str_val, *section_name; + bool last_section_found = false; + + *parsed_results_bytes = 0; + + /* Read global_params section */ + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, &num_section_params); + if (strcmp(section_name, "global_params")) + return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + + /* Print global params */ + dump_buf += qed_print_section_params(dump_buf, + num_section_params, + results_buf, &results_offset); + + while (!last_section_found) { + dump_buf += qed_read_section_hdr(dump_buf, + §ion_name, + &num_section_params); + if (!strcmp(section_name, "fw_asserts")) { + /* Extract params */ + const char *storm_letter = NULL; + u32 storm_dump_size = 0; + + for (i = 0; i < num_section_params; i++) { + dump_buf += qed_read_param(dump_buf, + ¶m_name, + ¶m_str_val, + ¶m_num_val); + if (!strcmp(param_name, "storm")) + storm_letter = param_str_val; + else if (!strcmp(param_name, "size")) + storm_dump_size = param_num_val; + else + return + DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } + + if (!storm_letter || !storm_dump_size) + return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + + /* Print data */ + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "\n%sSTORM_ASSERT: size=%d\n", + storm_letter, storm_dump_size); + for (i = 0; i < storm_dump_size; i++, dump_buf++) + results_offset += + sprintf(qed_get_buf_ptr(results_buf, + results_offset), + "%08x\n", *dump_buf); + } else if (!strcmp(section_name, "last")) { + last_section_found = true; + } else { + return DBG_STATUS_FW_ASSERTS_PARSE_FAILED; + } + } + + /* Add 1 for string NULL termination */ + *parsed_results_bytes = results_offset + 1; + + return DBG_STATUS_OK; +} + +/***************************** Public Functions *******************************/ + +enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, + const u8 * const bin_ptr) +{ + struct bin_buffer_hdr *buf_hdrs = (struct bin_buffer_hdr *)bin_ptr; + u8 buf_id; + + /* Convert binary data to debug arrays */ + for (buf_id = 0; buf_id < MAX_BIN_DBG_BUFFER_TYPE; buf_id++) + qed_set_dbg_bin_buf(p_hwfn, + (enum bin_dbg_buffer_type)buf_id, + (u32 *)(bin_ptr + buf_hdrs[buf_id].offset), + buf_hdrs[buf_id].length); + + return DBG_STATUS_OK; +} + +enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, + void **user_data_ptr) +{ + *user_data_ptr = kzalloc(sizeof(struct dbg_tools_user_data), + GFP_KERNEL); + if (!(*user_data_ptr)) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + return DBG_STATUS_OK; +} + +const char *qed_dbg_get_status_str(enum dbg_status status) +{ + return (status < + MAX_DBG_STATUS) ? s_status_str[status] : "Invalid debug status"; +} + +enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + u32 num_errors, num_warnings; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + NULL, + results_buf_size, + &num_errors, &num_warnings); +} + +enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf, + u32 *num_errors, + u32 *num_warnings) +{ + u32 parsed_buf_size; + + return qed_parse_idle_chk_dump(p_hwfn, + dump_buf, + num_dumped_dwords, + results_buf, + &parsed_buf_size, + num_errors, num_warnings); +} + +void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, + const u32 *meta_buf) +{ + struct dbg_tools_user_data *dev_user_data = + qed_dbg_get_user_data(p_hwfn); + + dev_user_data->mcp_trace_user_meta_buf = meta_buf; +} + +enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, NULL, results_buf_size, true); +} + +enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + /* Doesn't do anything, needed for compile time asserts */ + qed_user_static_asserts(); + + return qed_parse_mcp_trace_dump(p_hwfn, + dump_buf, + results_buf, &parsed_buf_size, true); +} + +enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_mcp_trace_dump(p_hwfn, dump_buf, results_buf, + &parsed_buf_size, false); +} + +enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, + u8 *dump_buf, + u32 num_dumped_bytes, + char *results_buf) +{ + u32 parsed_results_bytes; + + return qed_parse_mcp_trace_buf(p_hwfn, + dump_buf, + num_dumped_bytes, + 0, + num_dumped_bytes, + results_buf, &parsed_results_bytes); +} + +/* Frees the specified MCP Trace meta data */ +void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn) +{ + struct dbg_tools_user_data *dev_user_data; + struct mcp_trace_meta *meta; + u32 i; + + dev_user_data = qed_dbg_get_user_data(p_hwfn); + meta = &dev_user_data->mcp_trace_meta; + if (!meta->is_allocated) + return; + + /* Release modules */ + if (meta->modules) { + for (i = 0; i < meta->modules_num; i++) + kfree(meta->modules[i]); + kfree(meta->modules); + } + + /* Release formats */ + if (meta->formats) { + for (i = 0; i < meta->formats_num; i++) + kfree(meta->formats[i].format_str); + kfree(meta->formats); + } + + meta->is_allocated = false; +} + +enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_reg_fifo_dump(dump_buf, NULL, results_buf_size); +} + +enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_reg_fifo_dump(dump_buf, results_buf, &parsed_buf_size); +} + +enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_igu_fifo_dump(dump_buf, NULL, results_buf_size); +} + +enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_igu_fifo_dump(dump_buf, results_buf, &parsed_buf_size); +} + +enum dbg_status +qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_protection_override_dump(dump_buf, + NULL, results_buf_size); +} + +enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_protection_override_dump(dump_buf, + results_buf, + &parsed_buf_size); +} + +enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size) +{ + return qed_parse_fw_asserts_dump(dump_buf, NULL, results_buf_size); +} + +enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 parsed_buf_size; + + return qed_parse_fw_asserts_dump(dump_buf, + results_buf, &parsed_buf_size); +} + +enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, + struct dbg_attn_block_result *results) +{ + const u32 *block_attn_name_offsets; + const char *attn_name_base; + const char *block_name; + enum dbg_attn_type attn_type; + u8 num_regs, i, j; + + num_regs = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_NUM_REGS); + attn_type = GET_FIELD(results->data, DBG_ATTN_BLOCK_RESULT_ATTN_TYPE); + block_name = qed_dbg_get_block_name(p_hwfn, results->block_id); + if (!block_name) + return DBG_STATUS_INVALID_ARGS; + + if (!p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr || + !p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr) + return DBG_STATUS_DBG_ARRAY_NOT_SET; + + block_attn_name_offsets = + (u32 *)p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_NAME_OFFSETS].ptr + + results->names_offset; + + attn_name_base = p_hwfn->dbg_arrays[BIN_BUF_DBG_PARSING_STRINGS].ptr; + + /* Go over registers with a non-zero attention status */ + for (i = 0; i < num_regs; i++) { + struct dbg_attn_bit_mapping *bit_mapping; + struct dbg_attn_reg_result *reg_result; + u8 num_reg_attn, bit_idx = 0; + + reg_result = &results->reg_results[i]; + num_reg_attn = GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_NUM_REG_ATTN); + bit_mapping = (struct dbg_attn_bit_mapping *) + p_hwfn->dbg_arrays[BIN_BUF_DBG_ATTN_INDEXES].ptr + + reg_result->block_attn_offset; + + /* Go over attention status bits */ + for (j = 0; j < num_reg_attn; j++) { + u16 attn_idx_val = GET_FIELD(bit_mapping[j].data, + DBG_ATTN_BIT_MAPPING_VAL); + const char *attn_name, *attn_type_str, *masked_str; + u32 attn_name_offset; + u32 sts_addr; + + /* Check if bit mask should be advanced (due to unused + * bits). + */ + if (GET_FIELD(bit_mapping[j].data, + DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT)) { + bit_idx += (u8)attn_idx_val; + continue; + } + + /* Check current bit index */ + if (reg_result->sts_val & BIT(bit_idx)) { + /* An attention bit with value=1 was found + * Find attention name + */ + attn_name_offset = + block_attn_name_offsets[attn_idx_val]; + attn_name = attn_name_base + attn_name_offset; + attn_type_str = + (attn_type == + ATTN_TYPE_INTERRUPT ? "Interrupt" : + "Parity"); + masked_str = reg_result->mask_val & + BIT(bit_idx) ? + " [masked]" : ""; + sts_addr = + GET_FIELD(reg_result->data, + DBG_ATTN_REG_RESULT_STS_ADDRESS); + DP_NOTICE(p_hwfn, + "%s (%s) : %s [address 0x%08x, bit %d]%s\n", + block_name, attn_type_str, attn_name, + sts_addr * 4, bit_idx, masked_str); + } + + bit_idx++; + } + } + + return DBG_STATUS_OK; +} + +/* Wrapper for unifying the idle_chk and mcp_trace api */ +static enum dbg_status +qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + char *results_buf) +{ + u32 num_errors, num_warnnings; + + return qed_print_idle_chk_results(p_hwfn, dump_buf, num_dumped_dwords, + results_buf, &num_errors, + &num_warnnings); +} + +static DEFINE_MUTEX(qed_dbg_lock); + +#define MAX_PHY_RESULT_BUFFER 9000 + +/******************************** Feature Meta data section ******************/ + +#define GRC_NUM_STR_FUNCS 2 +#define IDLE_CHK_NUM_STR_FUNCS 1 +#define MCP_TRACE_NUM_STR_FUNCS 1 +#define REG_FIFO_NUM_STR_FUNCS 1 +#define IGU_FIFO_NUM_STR_FUNCS 1 +#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1 +#define FW_ASSERTS_NUM_STR_FUNCS 1 +#define ILT_NUM_STR_FUNCS 1 +#define PHY_NUM_STR_FUNCS 20 + +/* Feature meta data lookup table */ +static struct { + char *name; + u32 num_funcs; + enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *size); + enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *dump_buf, + u32 buf_size, u32 *dumped_dwords); + enum dbg_status (*print_results)(struct qed_hwfn *p_hwfn, + u32 *dump_buf, u32 num_dumped_dwords, + char *results_buf); + enum dbg_status (*results_buf_size)(struct qed_hwfn *p_hwfn, + u32 *dump_buf, + u32 num_dumped_dwords, + u32 *results_buf_size); + const struct qed_func_lookup *hsi_func_lookup; +} qed_features_lookup[] = { + { + "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size, + qed_dbg_grc_dump, NULL, NULL, NULL}, { + "idle_chk", IDLE_CHK_NUM_STR_FUNCS, + qed_dbg_idle_chk_get_dump_buf_size, + qed_dbg_idle_chk_dump, + qed_print_idle_chk_results_wrapper, + qed_get_idle_chk_results_buf_size, + NULL}, { + "mcp_trace", MCP_TRACE_NUM_STR_FUNCS, + qed_dbg_mcp_trace_get_dump_buf_size, + qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results, + qed_get_mcp_trace_results_buf_size, + NULL}, { + "reg_fifo", REG_FIFO_NUM_STR_FUNCS, + qed_dbg_reg_fifo_get_dump_buf_size, + qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results, + qed_get_reg_fifo_results_buf_size, + NULL}, { + "igu_fifo", IGU_FIFO_NUM_STR_FUNCS, + qed_dbg_igu_fifo_get_dump_buf_size, + qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results, + qed_get_igu_fifo_results_buf_size, + NULL}, { + "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS, + qed_dbg_protection_override_get_dump_buf_size, + qed_dbg_protection_override_dump, + qed_print_protection_override_results, + qed_get_protection_override_results_buf_size, + NULL}, { + "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS, + qed_dbg_fw_asserts_get_dump_buf_size, + qed_dbg_fw_asserts_dump, + qed_print_fw_asserts_results, + qed_get_fw_asserts_results_buf_size, + NULL}, { + "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size, + qed_dbg_ilt_dump, NULL, NULL, NULL},}; + +static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size) +{ + u32 i, precision = 80; + + if (!p_text_buf) + return; + + pr_notice("\n%.*s", precision, p_text_buf); + for (i = precision; i < text_size; i += precision) + pr_cont("%.*s", precision, p_text_buf + i); + pr_cont("\n"); +} + +#define QED_RESULTS_BUF_MIN_SIZE 16 +/* Generic function for decoding debug feature info */ +static enum dbg_status format_feature(struct qed_hwfn *p_hwfn, + enum qed_dbg_features feature_idx) +{ + struct qed_dbg_feature *feature = + &p_hwfn->cdev->dbg_features[feature_idx]; + u32 txt_size_bytes, null_char_pos, i; + u32 *dbuf, dwords; + enum dbg_status rc; + char *text_buf; + + /* Check if feature supports formatting capability */ + if (!qed_features_lookup[feature_idx].results_buf_size) + return DBG_STATUS_OK; + + dbuf = (u32 *)feature->dump_buf; + dwords = feature->dumped_dwords; + + /* Obtain size of formatted output */ + rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn, + dbuf, + dwords, + &txt_size_bytes); + if (rc != DBG_STATUS_OK) + return rc; + + /* Make sure that the allocated size is a multiple of dword + * (4 bytes). + */ + null_char_pos = txt_size_bytes - 1; + txt_size_bytes = (txt_size_bytes + 3) & ~0x3; + + if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "formatted size of feature was too small %d. Aborting\n", + txt_size_bytes); + return DBG_STATUS_INVALID_ARGS; + } + + /* allocate temp text buf */ + text_buf = vzalloc(txt_size_bytes); + if (!text_buf) { + DP_NOTICE(p_hwfn->cdev, + "failed to allocate text buffer. Aborting\n"); + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + } + + /* Decode feature opcodes to string on temp buf */ + rc = qed_features_lookup[feature_idx].print_results(p_hwfn, + dbuf, + dwords, + text_buf); + if (rc != DBG_STATUS_OK) { + vfree(text_buf); + return rc; + } + + /* Replace the original null character with a '\n' character. + * The bytes that were added as a result of the dword alignment are also + * padded with '\n' characters. + */ + for (i = null_char_pos; i < txt_size_bytes; i++) + text_buf[i] = '\n'; + + /* Dump printable feature to log */ + if (p_hwfn->cdev->print_dbg_data) + qed_dbg_print_feature(text_buf, txt_size_bytes); + + /* Dump binary data as is to the output file */ + if (p_hwfn->cdev->dbg_bin_dump) { + vfree(text_buf); + return rc; + } + + /* Free the old dump_buf and point the dump_buf to the newly allocated + * and formatted text buffer. + */ + vfree(feature->dump_buf); + feature->dump_buf = text_buf; + feature->buf_size = txt_size_bytes; + feature->dumped_dwords = txt_size_bytes / 4; + + return rc; +} + +#define MAX_DBG_FEATURE_SIZE_DWORDS 0x3FFFFFFF + +/* Generic function for performing the dump of a debug feature. */ +static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_dbg_features feature_idx) +{ + struct qed_dbg_feature *feature = + &p_hwfn->cdev->dbg_features[feature_idx]; + u32 buf_size_dwords, *dbuf, *dwords; + enum dbg_status rc; + + DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n", + qed_features_lookup[feature_idx].name); + + /* Dump_buf was already allocated need to free (this can happen if dump + * was called but file was never read). + * We can't use the buffer as is since size may have changed. + */ + if (feature->dump_buf) { + vfree(feature->dump_buf); + feature->dump_buf = NULL; + } + + /* Get buffer size from hsi, allocate accordingly, and perform the + * dump. + */ + rc = qed_features_lookup[feature_idx].get_size(p_hwfn, p_ptt, + &buf_size_dwords); + if (rc != DBG_STATUS_OK && rc != DBG_STATUS_NVRAM_GET_IMAGE_FAILED) + return rc; + + if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) { + feature->buf_size = 0; + DP_NOTICE(p_hwfn->cdev, + "Debug feature [\"%s\"] size (0x%x dwords) exceeds maximum size (0x%x dwords)\n", + qed_features_lookup[feature_idx].name, + buf_size_dwords, MAX_DBG_FEATURE_SIZE_DWORDS); + + return DBG_STATUS_OK; + } + + feature->buf_size = buf_size_dwords * sizeof(u32); + feature->dump_buf = vmalloc(feature->buf_size); + if (!feature->dump_buf) + return DBG_STATUS_VIRT_MEM_ALLOC_FAILED; + + dbuf = (u32 *)feature->dump_buf; + dwords = &feature->dumped_dwords; + rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt, + dbuf, + feature->buf_size / + sizeof(u32), + dwords); + + /* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error. + * In this case the buffer holds valid binary data, but we won't able + * to parse it (since parsing relies on data in NVRAM which is only + * accessible when MFW is responsive). skip the formatting but return + * success so that binary data is provided. + */ + if (rc == DBG_STATUS_NVRAM_GET_IMAGE_FAILED) + return DBG_STATUS_OK; + + if (rc != DBG_STATUS_OK) + return rc; + + /* Format output */ + rc = format_feature(p_hwfn, feature_idx); + return rc; +} + +int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_GRC, num_dumped_bytes); +} + +int qed_dbg_grc_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_GRC); +} + +int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IDLE_CHK, + num_dumped_bytes); +} + +int qed_dbg_idle_chk_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_IDLE_CHK); +} + +int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_REG_FIFO, + num_dumped_bytes); +} + +int qed_dbg_reg_fifo_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_REG_FIFO); +} + +int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_IGU_FIFO, + num_dumped_bytes); +} + +int qed_dbg_igu_fifo_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_IGU_FIFO); +} + +static int qed_dbg_nvm_image_length(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, u32 *length) +{ + struct qed_nvm_image_att image_att; + int rc; + + *length = 0; + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); + if (rc) + return rc; + + *length = image_att.length; + + return rc; +} + +static int qed_dbg_nvm_image(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes, + enum qed_nvm_images image_id) +{ + struct qed_hwfn *p_hwfn = + &cdev->hwfns[cdev->engine_for_debug]; + u32 len_rounded; + int rc; + + *num_dumped_bytes = 0; + rc = qed_dbg_nvm_image_length(p_hwfn, image_id, &len_rounded); + if (rc) + return rc; + + DP_NOTICE(p_hwfn->cdev, + "Collecting a debug feature [\"nvram image %d\"]\n", + image_id); + + len_rounded = roundup(len_rounded, sizeof(u32)); + rc = qed_mcp_get_nvm_image(p_hwfn, image_id, buffer, len_rounded); + if (rc) + return rc; + + /* QED_NVM_IMAGE_NVM_META image is not swapped like other images */ + if (image_id != QED_NVM_IMAGE_NVM_META) + cpu_to_be32_array((__force __be32 *)buffer, + (const u32 *)buffer, + len_rounded / sizeof(u32)); + + *num_dumped_bytes = len_rounded; + + return rc; +} + +int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_PROTECTION_OVERRIDE, + num_dumped_bytes); +} + +int qed_dbg_protection_override_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_PROTECTION_OVERRIDE); +} + +int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_FW_ASSERTS, + num_dumped_bytes); +} + +int qed_dbg_fw_asserts_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_FW_ASSERTS); +} + +int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_ILT, num_dumped_bytes); +} + +int qed_dbg_ilt_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_ILT); +} + +int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes) +{ + return qed_dbg_feature(cdev, buffer, DBG_FEATURE_MCP_TRACE, + num_dumped_bytes); +} + +int qed_dbg_mcp_trace_size(struct qed_dev *cdev) +{ + return qed_dbg_feature_size(cdev, DBG_FEATURE_MCP_TRACE); +} + +/* Defines the amount of bytes allocated for recording the length of debugfs + * feature buffer. + */ +#define REGDUMP_HEADER_SIZE sizeof(u32) +#define REGDUMP_HEADER_SIZE_SHIFT 0 +#define REGDUMP_HEADER_SIZE_MASK 0xffffff +#define REGDUMP_HEADER_FEATURE_SHIFT 24 +#define REGDUMP_HEADER_FEATURE_MASK 0x1f +#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29 +#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1 +#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30 +#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1 +#define REGDUMP_HEADER_ENGINE_SHIFT 31 +#define REGDUMP_HEADER_ENGINE_MASK 0x1 +#define REGDUMP_MAX_SIZE 0x1000000 +#define ILT_DUMP_MAX_SIZE (1024 * 1024 * 15) + +enum debug_print_features { + OLD_MODE = 0, + IDLE_CHK = 1, + GRC_DUMP = 2, + MCP_TRACE = 3, + REG_FIFO = 4, + PROTECTION_OVERRIDE = 5, + IGU_FIFO = 6, + PHY = 7, + FW_ASSERTS = 8, + NVM_CFG1 = 9, + DEFAULT_CFG = 10, + NVM_META = 11, + MDUMP = 12, + ILT_DUMP = 13, +}; + +static u32 qed_calc_regdump_header(struct qed_dev *cdev, + enum debug_print_features feature, + int engine, u32 feature_size, + u8 omit_engine, u8 dbg_bin_dump) +{ + u32 res = 0; + + SET_FIELD(res, REGDUMP_HEADER_SIZE, feature_size); + if (res != feature_size) + DP_NOTICE(cdev, + "Feature %d is too large (size 0x%x) and will corrupt the dump\n", + feature, feature_size); + + SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature); + SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump); + SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine); + SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine); + + return res; +} + +int qed_dbg_all_data(struct qed_dev *cdev, void *buffer) +{ + u8 cur_engine, omit_engine = 0, org_engine; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; + struct dbg_tools_data *dev_data = &p_hwfn->dbg_info; + int grc_params[MAX_DBG_GRC_PARAMS], rc, i; + u32 offset = 0, feature_size; + + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + grc_params[i] = dev_data->grc.param_val[i]; + + if (!QED_IS_CMT(cdev)) + omit_engine = 1; + + cdev->dbg_bin_dump = 1; + mutex_lock(&qed_dbg_lock); + + org_engine = qed_get_debug_engine(cdev); + for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { + /* Collect idle_chks and grcDump for each hw function */ + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "obtaining idle_chk and grcdump for current engine\n"); + qed_set_debug_engine(cdev, cur_engine); + + /* First idle_chk */ + rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); + } + + /* Second idle_chk */ + rc = qed_dbg_idle_chk(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, IDLE_CHK, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc); + } + + /* reg_fifo dump */ + rc = qed_dbg_reg_fifo(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, REG_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc); + } + + /* igu_fifo dump */ + rc = qed_dbg_igu_fifo(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, IGU_FIFO, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc); + } + + /* protection_override dump */ + rc = qed_dbg_protection_override(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, + &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, + PROTECTION_OVERRIDE, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, + "qed_dbg_protection_override failed. rc = %d\n", + rc); + } + + /* fw_asserts dump */ + rc = qed_dbg_fw_asserts(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, FW_ASSERTS, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n", + rc); + } + + feature_size = qed_dbg_ilt_size(cdev); + if (!cdev->disable_ilt_dump && feature_size < + ILT_DUMP_MAX_SIZE) { + rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, ILT_DUMP, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n", + rc); + } + } + + /* Grc dump - must be last because when mcp stuck it will + * clutter idle_chk, reg_fifo, ... + */ + for (i = 0; i < MAX_DBG_GRC_PARAMS; i++) + dev_data->grc.param_val[i] = grc_params[i]; + + rc = qed_dbg_grc(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, GRC_DUMP, + cur_engine, + feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc); + } + } + + qed_set_debug_engine(cdev, org_engine); + + /* mcp_trace */ + rc = qed_dbg_mcp_trace(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else { + DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc); + } + + /* nvm cfg1 */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_CFG1); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", + rc); + } + + /* nvm default */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_DEFAULT_CFG); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, DEFAULT_CFG, + cur_engine, feature_size, + omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_DEFAULT_CFG, + "QED_NVM_IMAGE_DEFAULT_CFG", rc); + } + + /* nvm meta */ + rc = qed_dbg_nvm_image(cdev, + (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_NVM_META); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, NVM_META, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", + rc); + } + + /* nvm mdump */ + rc = qed_dbg_nvm_image(cdev, (u8 *)buffer + offset + + REGDUMP_HEADER_SIZE, &feature_size, + QED_NVM_IMAGE_MDUMP); + if (!rc) { + *(u32 *)((u8 *)buffer + offset) = + qed_calc_regdump_header(cdev, MDUMP, cur_engine, + feature_size, omit_engine, + cdev->dbg_bin_dump); + offset += (feature_size + REGDUMP_HEADER_SIZE); + } else if (rc != -ENOENT) { + DP_ERR(cdev, + "qed_dbg_nvm_image failed for image %d (%s), rc = %d\n", + QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc); + } + + mutex_unlock(&qed_dbg_lock); + cdev->dbg_bin_dump = 0; + + return 0; +} + +int qed_dbg_all_data_size(struct qed_dev *cdev) +{ + u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; + u8 cur_engine, org_engine; + + cdev->disable_ilt_dump = false; + org_engine = qed_get_debug_engine(cdev); + for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) { + /* Engine specific */ + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "calculating idle_chk and grcdump register length for current engine\n"); + qed_set_debug_engine(cdev, cur_engine); + regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) + + REGDUMP_HEADER_SIZE + + qed_dbg_protection_override_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev); + ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev); + if (ilt_len < ILT_DUMP_MAX_SIZE) { + total_ilt_len += ilt_len; + regs_len += ilt_len; + } + } + + qed_set_debug_engine(cdev, org_engine); + + /* Engine common */ + regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) + + REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev); + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_DEFAULT_CFG, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_META, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_MDUMP, &image_len); + if (image_len) + regs_len += REGDUMP_HEADER_SIZE + image_len; + + if (regs_len > REGDUMP_MAX_SIZE) { + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Dump exceeds max size 0x%x, disable ILT dump\n", + REGDUMP_MAX_SIZE); + cdev->disable_ilt_dump = true; + regs_len -= total_ilt_len; + } + + return regs_len; +} + +int qed_dbg_feature(struct qed_dev *cdev, void *buffer, + enum qed_dbg_features feature, u32 *num_dumped_bytes) +{ + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; + enum dbg_status dbg_rc; + struct qed_ptt *p_ptt; + int rc = 0; + + /* Acquire ptt */ + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EINVAL; + + /* Get dump */ + dbg_rc = qed_dbg_dump(p_hwfn, p_ptt, feature); + if (dbg_rc != DBG_STATUS_OK) { + DP_VERBOSE(cdev, QED_MSG_DEBUG, "%s\n", + qed_dbg_get_status_str(dbg_rc)); + *num_dumped_bytes = 0; + rc = -EINVAL; + goto out; + } + + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "copying debugfs feature to external buffer\n"); + memcpy(buffer, qed_feature->dump_buf, qed_feature->buf_size); + *num_dumped_bytes = cdev->dbg_features[feature].dumped_dwords * + 4; + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature) +{ + struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature]; + struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug]; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 buf_size_dwords; + enum dbg_status rc; + + if (!p_ptt) + return -EINVAL; + + rc = qed_features_lookup[feature].get_size(p_hwfn, p_ptt, + &buf_size_dwords); + if (rc != DBG_STATUS_OK) + buf_size_dwords = 0; + + /* Feature will not be dumped if it exceeds maximum size */ + if (buf_size_dwords > MAX_DBG_FEATURE_SIZE_DWORDS) + buf_size_dwords = 0; + + qed_ptt_release(p_hwfn, p_ptt); + qed_feature->buf_size = buf_size_dwords * sizeof(u32); + return qed_feature->buf_size; +} + +int qed_dbg_phy_size(struct qed_dev *cdev) +{ + /* return max size of phy info and + * phy mac_stat multiplied by the number of ports + */ + return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev)); +} + +u8 qed_get_debug_engine(struct qed_dev *cdev) +{ + return cdev->engine_for_debug; +} + +void qed_set_debug_engine(struct qed_dev *cdev, int engine_number) +{ + DP_VERBOSE(cdev, QED_MSG_DEBUG, "set debug engine to %d\n", + engine_number); + cdev->engine_for_debug = engine_number; +} + +void qed_dbg_pf_init(struct qed_dev *cdev) +{ + const u8 *dbg_values = NULL; + int i; + + /* Sync ver with debugbus qed code */ + qed_dbg_set_app_ver(TOOLS_VERSION); + + /* Debug values are after init values. + * The offset is the first dword of the file. + */ + dbg_values = cdev->firmware->data + *(u32 *)cdev->firmware->data; + + for_each_hwfn(cdev, i) { + qed_dbg_set_bin_ptr(&cdev->hwfns[i], dbg_values); + qed_dbg_user_set_bin_ptr(&cdev->hwfns[i], dbg_values); + } + + /* Set the hwfn to be 0 as default */ + cdev->engine_for_debug = 0; +} + +void qed_dbg_pf_exit(struct qed_dev *cdev) +{ + struct qed_dbg_feature *feature = NULL; + enum qed_dbg_features feature_idx; + + /* debug features' buffers may be allocated if debug feature was used + * but dump wasn't called + */ + for (feature_idx = 0; feature_idx < DBG_FEATURE_NUM; feature_idx++) { + feature = &cdev->dbg_features[feature_idx]; + if (feature->dump_buf) { + vfree(feature->dump_buf); + feature->dump_buf = NULL; + } + } +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h new file mode 100644 index 0000000000..b0d4b937cf --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015 QLogic Corporation + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_DEBUG_H +#define _QED_DEBUG_H + +enum qed_dbg_features { + DBG_FEATURE_GRC, + DBG_FEATURE_IDLE_CHK, + DBG_FEATURE_MCP_TRACE, + DBG_FEATURE_REG_FIFO, + DBG_FEATURE_IGU_FIFO, + DBG_FEATURE_PROTECTION_OVERRIDE, + DBG_FEATURE_FW_ASSERTS, + DBG_FEATURE_ILT, + DBG_FEATURE_NUM +}; + +/* Forward Declaration */ +struct qed_dev; +struct qed_hwfn; + +int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); +int qed_dbg_grc_size(struct qed_dev *cdev); +int qed_dbg_idle_chk(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_idle_chk_size(struct qed_dev *cdev); +int qed_dbg_reg_fifo(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_reg_fifo_size(struct qed_dev *cdev); +int qed_dbg_igu_fifo(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_igu_fifo_size(struct qed_dev *cdev); +int qed_dbg_protection_override(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_protection_override_size(struct qed_dev *cdev); +int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_fw_asserts_size(struct qed_dev *cdev); +int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes); +int qed_dbg_ilt_size(struct qed_dev *cdev); +int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer, + u32 *num_dumped_bytes); +int qed_dbg_mcp_trace_size(struct qed_dev *cdev); +int qed_dbg_phy_size(struct qed_dev *cdev); +int qed_dbg_all_data(struct qed_dev *cdev, void *buffer); +int qed_dbg_all_data_size(struct qed_dev *cdev); +u8 qed_get_debug_engine(struct qed_dev *cdev); +void qed_set_debug_engine(struct qed_dev *cdev, int engine_number); +int qed_dbg_feature(struct qed_dev *cdev, void *buffer, + enum qed_dbg_features feature, u32 *num_dumped_bytes); +int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature); + +void qed_dbg_pf_init(struct qed_dev *cdev); +void qed_dbg_pf_exit(struct qed_dev *cdev); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c new file mode 100644 index 0000000000..86a93cac26 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -0,0 +1,5520 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/vmalloc.h> +#include <linux/etherdevice.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_dev_api.h" +#include "qed_fcoe.h" +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_ooo.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" +#include "qed_rdma.h" +#include "qed_nvmetcp.h" + +static DEFINE_SPINLOCK(qm_lock); + +/******************** Doorbell Recovery *******************/ +/* The doorbell recovery mechanism consists of a list of entries which represent + * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each + * entity needs to register with the mechanism and provide the parameters + * describing it's doorbell, including a location where last used doorbell data + * can be found. The doorbell execute function will traverse the list and + * doorbell all of the registered entries. + */ +struct qed_db_recovery_entry { + struct list_head list_entry; + void __iomem *db_addr; + void *db_data; + enum qed_db_rec_width db_width; + enum qed_db_rec_space db_space; + u8 hwfn_idx; +}; + +/* Display a single doorbell recovery entry */ +static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + char *action) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", + action, + db_entry, + db_entry->db_addr, + db_entry->db_data, + db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", + db_entry->db_space == DB_REC_USER ? "user" : "kernel", + db_entry->hwfn_idx); +} + +/* Doorbell address sanity (address within doorbell bar range) */ +static bool qed_db_rec_sanity(struct qed_dev *cdev, + void __iomem *db_addr, + enum qed_db_rec_width db_width, + void *db_data) +{ + u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64; + + /* Make sure doorbell address is within the doorbell bar */ + if (db_addr < cdev->doorbells || + (u8 __iomem *)db_addr + width > + (u8 __iomem *)cdev->doorbells + cdev->db_size) { + WARN(true, + "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", + db_addr, + cdev->doorbells, + (u8 __iomem *)cdev->doorbells + cdev->db_size); + return false; + } + + /* ake sure doorbell data pointer is not null */ + if (!db_data) { + WARN(true, "Illegal doorbell data pointer: %p", db_data); + return false; + } + + return true; +} + +/* Find hwfn according to the doorbell address */ +static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, + void __iomem *db_addr) +{ + struct qed_hwfn *p_hwfn; + + /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ + if (cdev->num_hwfns > 1) + p_hwfn = db_addr < cdev->hwfns[1].doorbells ? + &cdev->hwfns[0] : &cdev->hwfns[1]; + else + p_hwfn = QED_LEADING_HWFN(cdev); + + return p_hwfn; +} + +/* Add a new entry to the doorbell recovery mechanism */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space) +{ + struct qed_db_recovery_entry *db_entry; + struct qed_hwfn *p_hwfn; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Create entry */ + db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); + if (!db_entry) { + DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); + return -ENOMEM; + } + + /* Populate entry */ + db_entry->db_addr = db_addr; + db_entry->db_data = db_data; + db_entry->db_width = db_width; + db_entry->db_space = db_space; + db_entry->hwfn_idx = p_hwfn->my_id; + + /* Display */ + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + return 0; +} + +/* Remove an entry from the doorbell recovery mechanism */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + struct qed_db_recovery_entry *db_entry = NULL; + struct qed_hwfn *p_hwfn; + int rc = -EINVAL; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + /* search according to db_data addr since db_addr is not unique (roce) */ + if (db_entry->db_data == db_data) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); + list_del(&db_entry->list_entry); + rc = 0; + break; + } + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + if (rc == -EINVAL) + + DP_NOTICE(p_hwfn, + "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", + db_data, db_addr); + else + kfree(db_entry); + + return rc; +} + +/* Initialize the doorbell recovery mechanism */ +static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); + + /* Make sure db_size was set in cdev */ + if (!p_hwfn->cdev->db_size) { + DP_ERR(p_hwfn->cdev, "db_size not set\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); + spin_lock_init(&p_hwfn->db_recovery_info.lock); + p_hwfn->db_recovery_info.db_recovery_counter = 0; + + return 0; +} + +/* Destroy the doorbell recovery mechanism */ +static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); + if (!list_empty(&p_hwfn->db_recovery_info.list)) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); + while (!list_empty(&p_hwfn->db_recovery_info.list)) { + db_entry = + list_first_entry(&p_hwfn->db_recovery_info.list, + struct qed_db_recovery_entry, + list_entry); + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); + list_del(&db_entry->list_entry); + kfree(db_entry); + } + } + p_hwfn->db_recovery_info.db_recovery_counter = 0; +} + +/* Print the content of the doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, + "Displaying doorbell recovery database. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/* Ring the doorbell of a single doorbell recovery entry */ +static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry) +{ + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %x\n", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "ringing doorbell address %p data %llx\n", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + + /* Sanity */ + if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, + db_entry->db_width, db_entry->db_data)) + return; + + /* Flush the write combined buffer. Since there are multiple doorbelling + * entities using the same address, if we don't flush, a transaction + * could be lost. + */ + wmb(); + + /* Ring the doorbell */ + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + + /* Flush the write combined buffer. Next doorbell may come from a + * different entity to the same address... + */ + wmb(); +} + +/* Traverse the doorbell recovery entry list and ring all the doorbells */ +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) + qed_db_recovery_ring(p_hwfn, db_entry); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/******************** Doorbell Recovery end ****************/ + +/********************************** NIG LLH ***********************************/ + +enum qed_llh_filter_type { + QED_LLH_FILTER_TYPE_MAC, + QED_LLH_FILTER_TYPE_PROTOCOL, +}; + +struct qed_llh_mac_filter { + u8 addr[ETH_ALEN]; +}; + +struct qed_llh_protocol_filter { + enum qed_llh_prot_filter_type_t type; + u16 source_port_or_eth_type; + u16 dest_port; +}; + +union qed_llh_filter { + struct qed_llh_mac_filter mac; + struct qed_llh_protocol_filter protocol; +}; + +struct qed_llh_filter_info { + bool b_enabled; + u32 ref_cnt; + enum qed_llh_filter_type type; + union qed_llh_filter filter; +}; + +struct qed_llh_info { + /* Number of LLH filters banks */ + u8 num_ppfid; + +#define MAX_NUM_PPFID 8 + u8 ppfid_array[MAX_NUM_PPFID]; + + /* Array of filters arrays: + * "num_ppfid" elements of filters banks, where each is an array of + * "NIG_REG_LLH_FUNC_FILTER_EN_SIZE" filters. + */ + struct qed_llh_filter_info **pp_filters; +}; + +static void qed_llh_free(struct qed_dev *cdev) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + u32 i; + + if (p_llh_info) { + if (p_llh_info->pp_filters) + for (i = 0; i < p_llh_info->num_ppfid; i++) + kfree(p_llh_info->pp_filters[i]); + + kfree(p_llh_info->pp_filters); + } + + kfree(p_llh_info); + cdev->p_llh_info = NULL; +} + +static int qed_llh_alloc(struct qed_dev *cdev) +{ + struct qed_llh_info *p_llh_info; + u32 size, i; + + p_llh_info = kzalloc(sizeof(*p_llh_info), GFP_KERNEL); + if (!p_llh_info) + return -ENOMEM; + cdev->p_llh_info = p_llh_info; + + for (i = 0; i < MAX_NUM_PPFID; i++) { + if (!(cdev->ppfid_bitmap & (0x1 << i))) + continue; + + p_llh_info->ppfid_array[p_llh_info->num_ppfid] = i; + DP_VERBOSE(cdev, QED_MSG_SP, "ppfid_array[%d] = %u\n", + p_llh_info->num_ppfid, i); + p_llh_info->num_ppfid++; + } + + size = p_llh_info->num_ppfid * sizeof(*p_llh_info->pp_filters); + p_llh_info->pp_filters = kzalloc(size, GFP_KERNEL); + if (!p_llh_info->pp_filters) + return -ENOMEM; + + size = NIG_REG_LLH_FUNC_FILTER_EN_SIZE * + sizeof(**p_llh_info->pp_filters); + for (i = 0; i < p_llh_info->num_ppfid; i++) { + p_llh_info->pp_filters[i] = kzalloc(size, GFP_KERNEL); + if (!p_llh_info->pp_filters[i]) + return -ENOMEM; + } + + return 0; +} + +static int qed_llh_shadow_sanity(struct qed_dev *cdev, + u8 ppfid, u8 filter_idx, const char *action) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + + if (ppfid >= p_llh_info->num_ppfid) { + DP_NOTICE(cdev, + "LLH shadow [%s]: using ppfid %d while only %d ppfids are available\n", + action, ppfid, p_llh_info->num_ppfid); + return -EINVAL; + } + + if (filter_idx >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(cdev, + "LLH shadow [%s]: using filter_idx %d while only %d filters are available\n", + action, filter_idx, NIG_REG_LLH_FUNC_FILTER_EN_SIZE); + return -EINVAL; + } + + return 0; +} + +#define QED_LLH_INVALID_FILTER_IDX 0xff + +static int +qed_llh_shadow_search_filter(struct qed_dev *cdev, + u8 ppfid, + union qed_llh_filter *p_filter, u8 *p_filter_idx) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + u8 i; + + rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "search"); + if (rc) + return rc; + + *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; + + p_filters = p_llh_info->pp_filters[ppfid]; + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!memcmp(p_filter, &p_filters[i].filter, + sizeof(*p_filter))) { + *p_filter_idx = i; + break; + } + } + + return 0; +} + +static int +qed_llh_shadow_get_free_idx(struct qed_dev *cdev, u8 ppfid, u8 *p_filter_idx) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + u8 i; + + rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "get_free_idx"); + if (rc) + return rc; + + *p_filter_idx = QED_LLH_INVALID_FILTER_IDX; + + p_filters = p_llh_info->pp_filters[ppfid]; + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (!p_filters[i].b_enabled) { + *p_filter_idx = i; + break; + } + } + + return 0; +} + +static int +__qed_llh_shadow_add_filter(struct qed_dev *cdev, + u8 ppfid, + u8 filter_idx, + enum qed_llh_filter_type type, + union qed_llh_filter *p_filter, u32 *p_ref_cnt) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + + rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "add"); + if (rc) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + if (!p_filters[filter_idx].ref_cnt) { + p_filters[filter_idx].b_enabled = true; + p_filters[filter_idx].type = type; + memcpy(&p_filters[filter_idx].filter, p_filter, + sizeof(p_filters[filter_idx].filter)); + } + + *p_ref_cnt = ++p_filters[filter_idx].ref_cnt; + + return 0; +} + +static int +qed_llh_shadow_add_filter(struct qed_dev *cdev, + u8 ppfid, + enum qed_llh_filter_type type, + union qed_llh_filter *p_filter, + u8 *p_filter_idx, u32 *p_ref_cnt) +{ + int rc; + + /* Check if the same filter already exist */ + rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); + if (rc) + return rc; + + /* Find a new entry in case of a new filter */ + if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { + rc = qed_llh_shadow_get_free_idx(cdev, ppfid, p_filter_idx); + if (rc) + return rc; + } + + /* No free entry was found */ + if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { + DP_NOTICE(cdev, + "Failed to find an empty LLH filter to utilize [ppfid %d]\n", + ppfid); + return -EINVAL; + } + + return __qed_llh_shadow_add_filter(cdev, ppfid, *p_filter_idx, type, + p_filter, p_ref_cnt); +} + +static int +__qed_llh_shadow_remove_filter(struct qed_dev *cdev, + u8 ppfid, u8 filter_idx, u32 *p_ref_cnt) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + + rc = qed_llh_shadow_sanity(cdev, ppfid, filter_idx, "remove"); + if (rc) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + if (!p_filters[filter_idx].ref_cnt) { + DP_NOTICE(cdev, + "LLH shadow: trying to remove a filter with ref_cnt=0\n"); + return -EINVAL; + } + + *p_ref_cnt = --p_filters[filter_idx].ref_cnt; + if (!p_filters[filter_idx].ref_cnt) + memset(&p_filters[filter_idx], + 0, sizeof(p_filters[filter_idx])); + + return 0; +} + +static int +qed_llh_shadow_remove_filter(struct qed_dev *cdev, + u8 ppfid, + union qed_llh_filter *p_filter, + u8 *p_filter_idx, u32 *p_ref_cnt) +{ + int rc; + + rc = qed_llh_shadow_search_filter(cdev, ppfid, p_filter, p_filter_idx); + if (rc) + return rc; + + /* No matching filter was found */ + if (*p_filter_idx == QED_LLH_INVALID_FILTER_IDX) { + DP_NOTICE(cdev, "Failed to find a filter in the LLH shadow\n"); + return -EINVAL; + } + + return __qed_llh_shadow_remove_filter(cdev, ppfid, *p_filter_idx, + p_ref_cnt); +} + +static int qed_llh_abs_ppfid(struct qed_dev *cdev, u8 ppfid, u8 *p_abs_ppfid) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + + if (ppfid >= p_llh_info->num_ppfid) { + DP_NOTICE(cdev, + "ppfid %d is not valid, available indices are 0..%d\n", + ppfid, p_llh_info->num_ppfid - 1); + *p_abs_ppfid = 0; + return -EINVAL; + } + + *p_abs_ppfid = p_llh_info->ppfid_array[ppfid]; + + return 0; +} + +static int +qed_llh_set_engine_affin(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + enum qed_eng eng; + u8 ppfid; + int rc; + + rc = qed_mcp_get_engine_config(p_hwfn, p_ptt); + if (rc != 0 && rc != -EOPNOTSUPP) { + DP_NOTICE(p_hwfn, + "Failed to get the engine affinity configuration\n"); + return rc; + } + + /* RoCE PF is bound to a single engine */ + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { + eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; + rc = qed_llh_set_roce_affinity(cdev, eng); + if (rc) { + DP_NOTICE(cdev, + "Failed to set the RoCE engine affinity\n"); + return rc; + } + + DP_VERBOSE(cdev, + QED_MSG_SP, + "LLH: Set the engine affinity of RoCE packets as %d\n", + eng); + } + + /* Storage PF is bound to a single engine while L2 PF uses both */ + if (QED_IS_FCOE_PERSONALITY(p_hwfn) || QED_IS_ISCSI_PERSONALITY(p_hwfn) || + QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + eng = cdev->fir_affin ? QED_ENG1 : QED_ENG0; + else /* L2_PERSONALITY */ + eng = QED_BOTH_ENG; + + for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { + rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); + if (rc) { + DP_NOTICE(cdev, + "Failed to set the engine affinity of ppfid %d\n", + ppfid); + return rc; + } + } + + DP_VERBOSE(cdev, QED_MSG_SP, + "LLH: Set the engine affinity of non-RoCE packets as %d\n", + eng); + + return 0; +} + +static int qed_llh_hw_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u8 ppfid, abs_ppfid; + int rc; + + for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { + u32 addr; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + return rc; + + addr = NIG_REG_LLH_PPFID2PFID_TBL_0 + abs_ppfid * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_hwfn->rel_pf_id); + } + + if (test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && + !QED_IS_FCOE_PERSONALITY(p_hwfn)) { + rc = qed_llh_add_mac_filter(cdev, 0, + p_hwfn->hw_info.hw_mac_addr); + if (rc) + DP_NOTICE(cdev, + "Failed to add an LLH filter with the primary MAC\n"); + } + + if (QED_IS_CMT(cdev)) { + rc = qed_llh_set_engine_affin(p_hwfn, p_ptt); + if (rc) + return rc; + } + + return 0; +} + +u8 qed_llh_get_num_ppfid(struct qed_dev *cdev) +{ + return cdev->p_llh_info->num_ppfid; +} + +#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_MASK 0x3 +#define NIG_REG_PPF_TO_ENGINE_SEL_ROCE_SHIFT 0 +#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_MASK 0x3 +#define NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE_SHIFT 2 + +int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 addr, val, eng_sel; + u8 abs_ppfid; + int rc = 0; + + if (!p_ptt) + return -EAGAIN; + + if (!QED_IS_CMT(cdev)) + goto out; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto out; + + switch (eng) { + case QED_ENG0: + eng_sel = 0; + break; + case QED_ENG1: + eng_sel = 1; + break; + case QED_BOTH_ENG: + eng_sel = 2; + break; + default: + DP_NOTICE(cdev, "Invalid affinity value for ppfid [%d]\n", eng); + rc = -EINVAL; + goto out; + } + + addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; + val = qed_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_NON_ROCE, eng_sel); + qed_wr(p_hwfn, p_ptt, addr, val); + + /* The iWARP affinity is set as the affinity of ppfid 0 */ + if (!ppfid && QED_IS_IWARP_PERSONALITY(p_hwfn)) + cdev->iwarp_affin = (eng == QED_ENG1) ? 1 : 0; +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 addr, val, eng_sel; + u8 ppfid, abs_ppfid; + int rc = 0; + + if (!p_ptt) + return -EAGAIN; + + if (!QED_IS_CMT(cdev)) + goto out; + + switch (eng) { + case QED_ENG0: + eng_sel = 0; + break; + case QED_ENG1: + eng_sel = 1; + break; + case QED_BOTH_ENG: + eng_sel = 2; + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL, + 0xf); /* QP bit 15 */ + break; + default: + DP_NOTICE(cdev, "Invalid affinity value for RoCE [%d]\n", eng); + rc = -EINVAL; + goto out; + } + + for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) { + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto out; + + addr = NIG_REG_PPF_TO_ENGINE_SEL + abs_ppfid * 0x4; + val = qed_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, NIG_REG_PPF_TO_ENGINE_SEL_ROCE, eng_sel); + qed_wr(p_hwfn, p_ptt, addr, val); + } +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +struct qed_llh_filter_details { + u64 value; + u32 mode; + u32 protocol_type; + u32 hdr_sel; + u32 enable; +}; + +static int +qed_llh_access_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 abs_ppfid, + u8 filter_idx, + struct qed_llh_filter_details *p_details) +{ + struct qed_dmae_params params = {0}; + u32 addr; + u8 pfid; + int rc; + + /* The NIG/LLH registers that are accessed in this function have only 16 + * rows which are exposed to a PF. I.e. only the 16 filters of its + * default ppfid. Accessing filters of other ppfids requires pretending + * to another PFs. + * The calculation of PPFID->PFID in AH is based on the relative index + * of a PF on its port. + * For BB the pfid is actually the abs_ppfid. + */ + if (QED_IS_BB(p_hwfn->cdev)) + pfid = abs_ppfid; + else + pfid = abs_ppfid * p_hwfn->cdev->num_ports_in_engine + + MFW_PORT(p_hwfn); + + /* Filter enable - should be done first when removing a filter */ + if (!p_details->enable) { + qed_fid_pretend(p_hwfn, p_ptt, + pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + + addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_details->enable); + + qed_fid_pretend(p_hwfn, p_ptt, + p_hwfn->rel_pf_id << + PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + } + + /* Filter value */ + addr = NIG_REG_LLH_FUNC_FILTER_VALUE + 2 * filter_idx * 0x4; + + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_PF_VALID, 0x1); + params.dst_pfid = pfid; + rc = qed_dmae_host2grc(p_hwfn, + p_ptt, + (u64)(uintptr_t)&p_details->value, + addr, 2 /* size_in_dwords */, + ¶ms); + if (rc) + return rc; + + qed_fid_pretend(p_hwfn, p_ptt, + pfid << PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + + /* Filter mode */ + addr = NIG_REG_LLH_FUNC_FILTER_MODE + filter_idx * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_details->mode); + + /* Filter protocol type */ + addr = NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + filter_idx * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_details->protocol_type); + + /* Filter header select */ + addr = NIG_REG_LLH_FUNC_FILTER_HDR_SEL + filter_idx * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_details->hdr_sel); + + /* Filter enable - should be done last when adding a filter */ + if (p_details->enable) { + addr = NIG_REG_LLH_FUNC_FILTER_EN + filter_idx * 0x4; + qed_wr(p_hwfn, p_ptt, addr, p_details->enable); + } + + qed_fid_pretend(p_hwfn, p_ptt, + p_hwfn->rel_pf_id << + PXP_PRETEND_CONCRETE_FID_PFID_SHIFT); + + return 0; +} + +static int +qed_llh_add_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 abs_ppfid, + u8 filter_idx, u8 filter_prot_type, u32 high, u32 low) +{ + struct qed_llh_filter_details filter_details; + + filter_details.enable = 1; + filter_details.value = ((u64)high << 32) | low; + filter_details.hdr_sel = 0; + filter_details.protocol_type = filter_prot_type; + /* Mode: 0: MAC-address classification 1: protocol classification */ + filter_details.mode = filter_prot_type ? 1 : 0; + + return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + &filter_details); +} + +static int +qed_llh_remove_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_ppfid, u8 filter_idx) +{ + struct qed_llh_filter_details filter_details = {0}; + + return qed_llh_access_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + &filter_details); +} + +int qed_llh_add_mac_filter(struct qed_dev *cdev, + u8 ppfid, const u8 mac_addr[ETH_ALEN]) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + union qed_llh_filter filter = {}; + u8 filter_idx, abs_ppfid = 0; + u32 high, low, ref_cnt; + int rc = 0; + + if (!p_ptt) + return -EAGAIN; + + if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + goto out; + + memcpy(filter.mac.addr, mac_addr, ETH_ALEN); + rc = qed_llh_shadow_add_filter(cdev, ppfid, + QED_LLH_FILTER_TYPE_MAC, + &filter, &filter_idx, &ref_cnt); + if (rc) + goto err; + + /* Configure the LLH only in case of a new the filter */ + if (ref_cnt == 1) { + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto err; + + high = mac_addr[1] | (mac_addr[0] << 8); + low = mac_addr[5] | (mac_addr[4] << 8) | (mac_addr[3] << 16) | + (mac_addr[2] << 24); + rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, filter_idx, + 0, high, low); + if (rc) + goto err; + } + + DP_VERBOSE(cdev, + QED_MSG_SP, + "LLH: Added MAC filter [%pM] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: DP_NOTICE(cdev, + "LLH: Failed to add MAC filter [%pM] to ppfid %hhd\n", + mac_addr, ppfid); +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +static int +qed_llh_protocol_filter_stringify(struct qed_dev *cdev, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, + u16 dest_port, u8 *str, size_t str_len) +{ + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + snprintf(str, str_len, "Ethertype 0x%04x", + source_port_or_eth_type); + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + snprintf(str, str_len, "TCP src port 0x%04x", + source_port_or_eth_type); + break; + case QED_LLH_FILTER_UDP_SRC_PORT: + snprintf(str, str_len, "UDP src port 0x%04x", + source_port_or_eth_type); + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + snprintf(str, str_len, "TCP dst port 0x%04x", dest_port); + break; + case QED_LLH_FILTER_UDP_DEST_PORT: + snprintf(str, str_len, "UDP dst port 0x%04x", dest_port); + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + snprintf(str, str_len, "TCP src/dst ports 0x%04x/0x%04x", + source_port_or_eth_type, dest_port); + break; + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + snprintf(str, str_len, "UDP src/dst ports 0x%04x/0x%04x", + source_port_or_eth_type, dest_port); + break; + default: + DP_NOTICE(cdev, + "Non valid LLH protocol filter type %d\n", type); + return -EINVAL; + } + + return 0; +} + +static int +qed_llh_protocol_filter_to_hilo(struct qed_dev *cdev, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, + u16 dest_port, u32 *p_high, u32 *p_low) +{ + *p_high = 0; + *p_low = 0; + + switch (type) { + case QED_LLH_FILTER_ETHERTYPE: + *p_high = source_port_or_eth_type; + break; + case QED_LLH_FILTER_TCP_SRC_PORT: + case QED_LLH_FILTER_UDP_SRC_PORT: + *p_low = source_port_or_eth_type << 16; + break; + case QED_LLH_FILTER_TCP_DEST_PORT: + case QED_LLH_FILTER_UDP_DEST_PORT: + *p_low = dest_port; + break; + case QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT: + case QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT: + *p_low = (source_port_or_eth_type << 16) | dest_port; + break; + default: + DP_NOTICE(cdev, + "Non valid LLH protocol filter type %d\n", type); + return -EINVAL; + } + + return 0; +} + +int +qed_llh_add_protocol_filter(struct qed_dev *cdev, + u8 ppfid, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid, str[32], type_bitmap; + union qed_llh_filter filter = {}; + u32 high, low, ref_cnt; + int rc = 0; + + if (!p_ptt) + return -EAGAIN; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) + goto out; + + rc = qed_llh_protocol_filter_stringify(cdev, type, + source_port_or_eth_type, + dest_port, str, sizeof(str)); + if (rc) + goto err; + + filter.protocol.type = type; + filter.protocol.source_port_or_eth_type = source_port_or_eth_type; + filter.protocol.dest_port = dest_port; + rc = qed_llh_shadow_add_filter(cdev, + ppfid, + QED_LLH_FILTER_TYPE_PROTOCOL, + &filter, &filter_idx, &ref_cnt); + if (rc) + goto err; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto err; + + /* Configure the LLH only in case of a new the filter */ + if (ref_cnt == 1) { + rc = qed_llh_protocol_filter_to_hilo(cdev, type, + source_port_or_eth_type, + dest_port, &high, &low); + if (rc) + goto err; + + type_bitmap = 0x1 << type; + rc = qed_llh_add_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx, type_bitmap, high, low); + if (rc) + goto err; + } + + DP_VERBOSE(cdev, + QED_MSG_SP, + "LLH: Added protocol filter [%s] to ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + str, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: DP_NOTICE(p_hwfn, + "LLH: Failed to add protocol filter [%s] to ppfid %hhd\n", + str, ppfid); +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +void qed_llh_remove_mac_filter(struct qed_dev *cdev, + u8 ppfid, u8 mac_addr[ETH_ALEN]) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + union qed_llh_filter filter = {}; + u8 filter_idx, abs_ppfid; + int rc = 0; + u32 ref_cnt; + + if (!p_ptt) + return; + + if (!test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + goto out; + + if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + return; + + ether_addr_copy(filter.mac.addr, mac_addr); + rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, + &ref_cnt); + if (rc) + goto err; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto err; + + /* Remove from the LLH in case the filter is not in use */ + if (!ref_cnt) { + rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx); + if (rc) + goto err; + } + + DP_VERBOSE(cdev, + QED_MSG_SP, + "LLH: Removed MAC filter [%pM] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + mac_addr, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: DP_NOTICE(cdev, + "LLH: Failed to remove MAC filter [%pM] from ppfid %hhd\n", + mac_addr, ppfid); +out: + qed_ptt_release(p_hwfn, p_ptt); +} + +void qed_llh_remove_protocol_filter(struct qed_dev *cdev, + u8 ppfid, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid, str[32]; + union qed_llh_filter filter = {}; + int rc = 0; + u32 ref_cnt; + + if (!p_ptt) + return; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits)) + goto out; + + rc = qed_llh_protocol_filter_stringify(cdev, type, + source_port_or_eth_type, + dest_port, str, sizeof(str)); + if (rc) + goto err; + + filter.protocol.type = type; + filter.protocol.source_port_or_eth_type = source_port_or_eth_type; + filter.protocol.dest_port = dest_port; + rc = qed_llh_shadow_remove_filter(cdev, ppfid, &filter, &filter_idx, + &ref_cnt); + if (rc) + goto err; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto err; + + /* Remove from the LLH in case the filter is not in use */ + if (!ref_cnt) { + rc = qed_llh_remove_filter(p_hwfn, p_ptt, abs_ppfid, + filter_idx); + if (rc) + goto err; + } + + DP_VERBOSE(cdev, + QED_MSG_SP, + "LLH: Removed protocol filter [%s] from ppfid %hhd [abs %hhd] at idx %hhd [ref_cnt %d]\n", + str, ppfid, abs_ppfid, filter_idx, ref_cnt); + + goto out; + +err: DP_NOTICE(cdev, + "LLH: Failed to remove protocol filter [%s] from ppfid %hhd\n", + str, ppfid); +out: + qed_ptt_release(p_hwfn, p_ptt); +} + +/******************************* NIG LLH - End ********************************/ + +#define QED_MIN_DPIS (4) +#define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) + +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum BAR_ID bar_id) +{ + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_hw_bar_size(p_hwfn, bar_id); + + val = qed_rd(p_hwfn, p_ptt, bar_reg); + if (val) + return 1 << (val + 15); + + /* Old MFW initialized above registered only conditionally */ + if (p_hwfn->cdev->num_hwfns > 1) { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); + return BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } else { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); + return 512 * 1024; + } +} + +void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) +{ + u32 i; + + cdev->dp_level = dp_level; + cdev->dp_module = dp_module; + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->dp_level = dp_level; + p_hwfn->dp_module = dp_module; + } +} + +void qed_init_struct(struct qed_dev *cdev) +{ + u8 i; + + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->cdev = cdev; + p_hwfn->my_id = i; + p_hwfn->b_active = false; + + mutex_init(&p_hwfn->dmae_info.mutex); + } + + /* hwfn 0 is always active */ + cdev->hwfns[0].b_active = true; + + /* set the default cache alignment to 128 */ + cdev->cache_shift = 7; +} + +static void qed_qm_info_free(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + kfree(qm_info->qm_pq_params); + qm_info->qm_pq_params = NULL; + kfree(qm_info->qm_vport_params); + qm_info->qm_vport_params = NULL; + kfree(qm_info->qm_port_params); + qm_info->qm_port_params = NULL; + kfree(qm_info->wfq_data); + qm_info->wfq_data = NULL; +} + +static void qed_dbg_user_data_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->dbg_user_info); + p_hwfn->dbg_user_info = NULL; +} + +void qed_resc_free(struct qed_dev *cdev) +{ + struct qed_rdma_info *rdma_info; + struct qed_hwfn *p_hwfn; + int i; + + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) + qed_l2_free(&cdev->hwfns[i]); + return; + } + + kfree(cdev->fw_data); + cdev->fw_data = NULL; + + kfree(cdev->reset_stats); + cdev->reset_stats = NULL; + + qed_llh_free(cdev); + + for_each_hwfn(cdev, i) { + p_hwfn = cdev->hwfns + i; + rdma_info = p_hwfn->p_rdma_info; + + qed_cxt_mngr_free(p_hwfn); + qed_qm_info_free(p_hwfn); + qed_spq_free(p_hwfn); + qed_eq_free(p_hwfn); + qed_consq_free(p_hwfn); + qed_int_free(p_hwfn); +#ifdef CONFIG_QED_LL2 + qed_ll2_free(p_hwfn); +#endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_free(p_hwfn); + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_free(p_hwfn); + qed_ooo_free(p_hwfn); + } + + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + qed_nvmetcp_free(p_hwfn); + qed_ooo_free(p_hwfn); + } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn) && rdma_info) { + qed_spq_unregister_async_cb(p_hwfn, rdma_info->proto); + qed_rdma_info_free(p_hwfn); + } + + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); + qed_iov_free(p_hwfn); + qed_l2_free(p_hwfn); + qed_dmae_info_free(p_hwfn); + qed_dcbx_info_free(p_hwfn); + qed_dbg_user_data_free(p_hwfn); + qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem); + + /* Destroy doorbell recovery mechanism */ + qed_db_recovery_teardown(p_hwfn); + } +} + +/******************** QM initialization *******************/ +#define ACTIVE_TCS_BMAP 0x9f +#define ACTIVE_TCS_BMAP_4PORT_K2 0xf + +/* determines the physical queue flags for a given PF. */ +static u32 qed_get_pq_flags(struct qed_hwfn *p_hwfn) +{ + u32 flags; + + /* common flags */ + flags = PQ_FLAGS_LB; + + /* feature flags */ + if (IS_QED_SRIOV(p_hwfn->cdev)) + flags |= PQ_FLAGS_VFS; + + /* protocol flags */ + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + flags |= PQ_FLAGS_MCOS; + break; + case QED_PCI_FCOE: + flags |= PQ_FLAGS_OFLD; + break; + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + flags |= PQ_FLAGS_ACK | PQ_FLAGS_OOO | PQ_FLAGS_OFLD; + break; + case QED_PCI_ETH_ROCE: + flags |= PQ_FLAGS_MCOS | PQ_FLAGS_OFLD | PQ_FLAGS_LLT; + if (IS_QED_MULTI_TC_ROCE(p_hwfn)) + flags |= PQ_FLAGS_MTC; + break; + case QED_PCI_ETH_IWARP: + flags |= PQ_FLAGS_MCOS | PQ_FLAGS_ACK | PQ_FLAGS_OOO | + PQ_FLAGS_OFLD; + break; + default: + DP_ERR(p_hwfn, + "unknown personality %d\n", p_hwfn->hw_info.personality); + return 0; + } + + return flags; +} + +/* Getters for resource amounts necessary for qm initialization */ +static u8 qed_init_qm_get_num_tcs(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->hw_info.num_hw_tc; +} + +static u16 qed_init_qm_get_num_vfs(struct qed_hwfn *p_hwfn) +{ + return IS_QED_SRIOV(p_hwfn->cdev) ? + p_hwfn->cdev->p_iov_info->total_vfs : 0; +} + +static u8 qed_init_qm_get_num_mtc_tcs(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + if (!(PQ_FLAGS_MTC & pq_flags)) + return 1; + + return qed_init_qm_get_num_tcs(p_hwfn); +} + +#define NUM_DEFAULT_RLS 1 + +static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn) +{ + u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); + + /* num RLs can't exceed resource amount of rls or vports */ + num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL), + RESC_NUM(p_hwfn, QED_VPORT)); + + /* Make sure after we reserve there's something left */ + if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS) + return 0; + + /* subtract rls necessary for VFs and one default one for the PF */ + num_pf_rls -= num_vfs + NUM_DEFAULT_RLS; + + return num_pf_rls; +} + +static u16 qed_init_qm_get_num_vports(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + /* all pqs share the same vport, except for vfs and pf_rl pqs */ + return (!!(PQ_FLAGS_RLS & pq_flags)) * + qed_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_VFS & pq_flags)) * + qed_init_qm_get_num_vfs(p_hwfn) + 1; +} + +/* calc amount of PQs according to the requested flags */ +static u16 qed_init_qm_get_num_pqs(struct qed_hwfn *p_hwfn) +{ + u32 pq_flags = qed_get_pq_flags(p_hwfn); + + return (!!(PQ_FLAGS_RLS & pq_flags)) * + qed_init_qm_get_num_pf_rls(p_hwfn) + + (!!(PQ_FLAGS_MCOS & pq_flags)) * + qed_init_qm_get_num_tcs(p_hwfn) + + (!!(PQ_FLAGS_LB & pq_flags)) + (!!(PQ_FLAGS_OOO & pq_flags)) + + (!!(PQ_FLAGS_ACK & pq_flags)) + + (!!(PQ_FLAGS_OFLD & pq_flags)) * + qed_init_qm_get_num_mtc_tcs(p_hwfn) + + (!!(PQ_FLAGS_LLT & pq_flags)) * + qed_init_qm_get_num_mtc_tcs(p_hwfn) + + (!!(PQ_FLAGS_VFS & pq_flags)) * qed_init_qm_get_num_vfs(p_hwfn); +} + +/* initialize the top level QM params */ +static void qed_init_qm_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool four_port; + + /* pq and vport bases for this PF */ + qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ); + qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT); + + /* rate limiting and weighted fair queueing are always enabled */ + qm_info->vport_rl_en = true; + qm_info->vport_wfq_en = true; + + /* TC config is different for AH 4 port */ + four_port = p_hwfn->cdev->num_ports_in_engine == MAX_NUM_PORTS_K2; + + /* in AH 4 port we have fewer TCs per port */ + qm_info->max_phys_tcs_per_port = four_port ? NUM_PHYS_TCS_4PORT_K2 : + NUM_OF_PHYS_TCS; + + /* unless MFW indicated otherwise, ooo_tc == 3 for + * AH 4-port and 4 otherwise. + */ + if (!qm_info->ooo_tc) + qm_info->ooo_tc = four_port ? DCBX_TCP_OOO_K2_4PORT_TC : + DCBX_TCP_OOO_TC; +} + +/* initialize qm vport params */ +static void qed_init_qm_vport_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 i; + + /* all vports participate in weighted fair queueing */ + for (i = 0; i < qed_init_qm_get_num_vports(p_hwfn); i++) + qm_info->qm_vport_params[i].wfq = 1; +} + +/* initialize qm port params */ +static void qed_init_qm_port_params(struct qed_hwfn *p_hwfn) +{ + /* Initialize qm port parameters */ + u8 i, active_phys_tcs, num_ports = p_hwfn->cdev->num_ports_in_engine; + struct qed_dev *cdev = p_hwfn->cdev; + + /* indicate how ooo and high pri traffic is dealt with */ + active_phys_tcs = num_ports == MAX_NUM_PORTS_K2 ? + ACTIVE_TCS_BMAP_4PORT_K2 : + ACTIVE_TCS_BMAP; + + for (i = 0; i < num_ports; i++) { + struct init_qm_port_params *p_qm_port = + &p_hwfn->qm_info.qm_port_params[i]; + u16 pbf_max_cmd_lines; + + p_qm_port->active = 1; + p_qm_port->active_phys_tcs = active_phys_tcs; + pbf_max_cmd_lines = (u16)NUM_OF_PBF_CMD_LINES(cdev); + p_qm_port->num_pbf_cmd_lines = pbf_max_cmd_lines / num_ports; + p_qm_port->num_btb_blocks = NUM_OF_BTB_BLOCKS(cdev) / num_ports; + } +} + +/* Reset the params which must be reset for qm init. QM init may be called as + * a result of flows other than driver load (e.g. dcbx renegotiation). Other + * params may be affected by the init but would simply recalculate to the same + * values. The allocations made for QM init, ports, vports, pqs and vfqs are not + * affected as these amounts stay the same. + */ +static void qed_init_qm_reset_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + qm_info->num_pqs = 0; + qm_info->num_vports = 0; + qm_info->num_pf_rls = 0; + qm_info->num_vf_pqs = 0; + qm_info->first_vf_pq = 0; + qm_info->first_mcos_pq = 0; + qm_info->first_rl_pq = 0; +} + +static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + qm_info->num_vports++; + + if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", + qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); +} + +/* initialize a single pq and manage qm_info resources accounting. + * The pq_init_flags param determines whether the PQ is rate limited + * (for VF or PF) and whether a new vport is allocated to the pq or not + * (i.e. vport will be shared). + */ + +/* flags for pq init */ +#define PQ_INIT_SHARE_VPORT BIT(0) +#define PQ_INIT_PF_RL BIT(1) +#define PQ_INIT_VF_RL BIT(2) + +/* defines for pq init */ +#define PQ_INIT_DEFAULT_WRR_GROUP 1 +#define PQ_INIT_DEFAULT_TC 0 + +void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc) +{ + p_info->offload_tc = tc; + p_info->offload_tc_set = true; +} + +static bool qed_is_offload_tc_set(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->hw_info.offload_tc_set; +} + +static u32 qed_get_offload_tc(struct qed_hwfn *p_hwfn) +{ + if (qed_is_offload_tc_set(p_hwfn)) + return p_hwfn->hw_info.offload_tc; + + return PQ_INIT_DEFAULT_TC; +} + +static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, + struct qed_qm_info *qm_info, + u8 tc, u32 pq_init_flags) +{ + u16 pq_idx = qm_info->num_pqs, max_pq = qed_init_qm_get_num_pqs(p_hwfn); + + if (pq_idx > max_pq) + DP_ERR(p_hwfn, + "pq overflow! pq %d, max pq %d\n", pq_idx, max_pq); + + /* init pq params */ + qm_info->qm_pq_params[pq_idx].port_id = p_hwfn->port_id; + qm_info->qm_pq_params[pq_idx].vport_id = qm_info->start_vport + + qm_info->num_vports; + qm_info->qm_pq_params[pq_idx].tc_id = tc; + qm_info->qm_pq_params[pq_idx].wrr_group = PQ_INIT_DEFAULT_WRR_GROUP; + qm_info->qm_pq_params[pq_idx].rl_valid = + (pq_init_flags & PQ_INIT_PF_RL || pq_init_flags & PQ_INIT_VF_RL); + + /* qm params accounting */ + qm_info->num_pqs++; + if (!(pq_init_flags & PQ_INIT_SHARE_VPORT)) + qm_info->num_vports++; + + if (pq_init_flags & PQ_INIT_PF_RL) + qm_info->num_pf_rls++; + + if (qm_info->num_vports > qed_init_qm_get_num_vports(p_hwfn)) + DP_ERR(p_hwfn, + "vport overflow! qm_info->num_vports %d, qm_init_get_num_vports() %d\n", + qm_info->num_vports, qed_init_qm_get_num_vports(p_hwfn)); + + if (qm_info->num_pf_rls > qed_init_qm_get_num_pf_rls(p_hwfn)) + DP_ERR(p_hwfn, + "rl overflow! qm_info->num_pf_rls %d, qm_init_get_num_pf_rls() %d\n", + qm_info->num_pf_rls, qed_init_qm_get_num_pf_rls(p_hwfn)); +} + +/* get pq index according to PQ_FLAGS */ +static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, + unsigned long pq_flags) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + /* Can't have multiple flags set here */ + if (bitmap_weight(&pq_flags, + sizeof(pq_flags) * BITS_PER_BYTE) > 1) { + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); + goto err; + } + + if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); + goto err; + } + + switch (pq_flags) { + case PQ_FLAGS_RLS: + return &qm_info->first_rl_pq; + case PQ_FLAGS_MCOS: + return &qm_info->first_mcos_pq; + case PQ_FLAGS_LB: + return &qm_info->pure_lb_pq; + case PQ_FLAGS_OOO: + return &qm_info->ooo_pq; + case PQ_FLAGS_ACK: + return &qm_info->pure_ack_pq; + case PQ_FLAGS_OFLD: + return &qm_info->first_ofld_pq; + case PQ_FLAGS_LLT: + return &qm_info->first_llt_pq; + case PQ_FLAGS_VFS: + return &qm_info->first_vf_pq; + default: + goto err; + } + +err: + return &qm_info->start_pq; +} + +/* save pq index in qm info */ +static void qed_init_qm_set_idx(struct qed_hwfn *p_hwfn, + u32 pq_flags, u16 pq_val) +{ + u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + *base_pq_idx = p_hwfn->qm_info.start_pq + pq_val; +} + +/* get tx pq index, with the PQ TX base already set (ready for context init) */ +u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags) +{ + u16 *base_pq_idx = qed_init_qm_get_idx_from_flags(p_hwfn, pq_flags); + + return *base_pq_idx + CM_TX_PQ_BASE; +} + +u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) +{ + u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); + + if (max_tc == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_MCOS); + return p_hwfn->qm_info.start_pq; + } + + if (tc > max_tc) + DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); + + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); +} + +u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) +{ + u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); + + if (max_vf == 0) { + DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", + PQ_FLAGS_VFS); + return p_hwfn->qm_info.start_pq; + } + + if (vf > max_vf) + DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); + + return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); +} + +u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) +{ + u16 first_ofld_pq, pq_offset; + + first_ofld_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? + tc : PQ_INIT_DEFAULT_TC; + + return first_ofld_pq + pq_offset; +} + +u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc) +{ + u16 first_llt_pq, pq_offset; + + first_llt_pq = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LLT); + pq_offset = (tc < qed_init_qm_get_num_mtc_tcs(p_hwfn)) ? + tc : PQ_INIT_DEFAULT_TC; + + return first_llt_pq + pq_offset; +} + +/* Functions for creating specific types of pqs */ +static void qed_init_qm_lb_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LB)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LB, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, PURE_LB_TC, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_ooo_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OOO)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OOO, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, qm_info->ooo_tc, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_pure_ack_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_ACK)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_ACK, qm_info->num_pqs); + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_mtc_pqs(struct qed_hwfn *p_hwfn) +{ + u8 num_tcs = qed_init_qm_get_num_mtc_tcs(p_hwfn); + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 tc; + + /* override pq's TC if offload TC is set */ + for (tc = 0; tc < num_tcs; tc++) + qed_init_qm_pq(p_hwfn, qm_info, + qed_is_offload_tc_set(p_hwfn) ? + p_hwfn->hw_info.offload_tc : tc, + PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_offload_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_OFLD)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_OFLD, qm_info->num_pqs); + qed_init_qm_mtc_pqs(p_hwfn); +} + +static void qed_init_qm_low_latency_pq(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_LLT)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_LLT, qm_info->num_pqs); + qed_init_qm_mtc_pqs(p_hwfn); +} + +static void qed_init_qm_mcos_pqs(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u8 tc_idx; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_MCOS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_MCOS, qm_info->num_pqs); + for (tc_idx = 0; tc_idx < qed_init_qm_get_num_tcs(p_hwfn); tc_idx++) + qed_init_qm_pq(p_hwfn, qm_info, tc_idx, PQ_INIT_SHARE_VPORT); +} + +static void qed_init_qm_vf_pqs(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + u16 vf_idx, num_vfs = qed_init_qm_get_num_vfs(p_hwfn); + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_VFS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_VFS, qm_info->num_pqs); + qm_info->num_vf_pqs = num_vfs; + for (vf_idx = 0; vf_idx < num_vfs; vf_idx++) + qed_init_qm_pq(p_hwfn, + qm_info, PQ_INIT_DEFAULT_TC, PQ_INIT_VF_RL); +} + +static void qed_init_qm_rl_pqs(struct qed_hwfn *p_hwfn) +{ + u16 pf_rls_idx, num_pf_rls = qed_init_qm_get_num_pf_rls(p_hwfn); + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + + if (!(qed_get_pq_flags(p_hwfn) & PQ_FLAGS_RLS)) + return; + + qed_init_qm_set_idx(p_hwfn, PQ_FLAGS_RLS, qm_info->num_pqs); + for (pf_rls_idx = 0; pf_rls_idx < num_pf_rls; pf_rls_idx++) + qed_init_qm_pq(p_hwfn, qm_info, qed_get_offload_tc(p_hwfn), + PQ_INIT_PF_RL); +} + +static void qed_init_qm_pq_params(struct qed_hwfn *p_hwfn) +{ + /* rate limited pqs, must come first (FW assumption) */ + qed_init_qm_rl_pqs(p_hwfn); + + /* pqs for multi cos */ + qed_init_qm_mcos_pqs(p_hwfn); + + /* pure loopback pq */ + qed_init_qm_lb_pq(p_hwfn); + + /* out of order pq */ + qed_init_qm_ooo_pq(p_hwfn); + + /* pure ack pq */ + qed_init_qm_pure_ack_pq(p_hwfn); + + /* pq for offloaded protocol */ + qed_init_qm_offload_pq(p_hwfn); + + /* low latency pq */ + qed_init_qm_low_latency_pq(p_hwfn); + + /* done sharing vports */ + qed_init_qm_advance_vport(p_hwfn); + + /* pqs for vfs */ + qed_init_qm_vf_pqs(p_hwfn); +} + +/* compare values of getters against resources amounts */ +static int qed_init_qm_sanity(struct qed_hwfn *p_hwfn) +{ + if (qed_init_qm_get_num_vports(p_hwfn) > RESC_NUM(p_hwfn, QED_VPORT)) { + DP_ERR(p_hwfn, "requested amount of vports exceeds resource\n"); + return -EINVAL; + } + + if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) + return 0; + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) { + p_hwfn->hw_info.multi_tc_roce_en = false; + DP_NOTICE(p_hwfn, + "multi-tc roce was disabled to reduce requested amount of pqs\n"); + if (qed_init_qm_get_num_pqs(p_hwfn) <= RESC_NUM(p_hwfn, QED_PQ)) + return 0; + } + + DP_ERR(p_hwfn, "requested amount of pqs exceeds resource\n"); + return -EINVAL; +} + +static void qed_dp_init_qm_params(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct init_qm_vport_params *vport; + struct init_qm_port_params *port; + struct init_qm_pq_params *pq; + int i, tc; + + /* top level params */ + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "qm init top level params: start_pq %d, start_vport %d, pure_lb_pq %d, offload_pq %d, llt_pq %d, pure_ack_pq %d\n", + qm_info->start_pq, + qm_info->start_vport, + qm_info->pure_lb_pq, + qm_info->first_ofld_pq, + qm_info->first_llt_pq, + qm_info->pure_ack_pq); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "ooo_pq %d, first_vf_pq %d, num_pqs %d, num_vf_pqs %d, num_vports %d, max_phys_tcs_per_port %d\n", + qm_info->ooo_pq, + qm_info->first_vf_pq, + qm_info->num_pqs, + qm_info->num_vf_pqs, + qm_info->num_vports, qm_info->max_phys_tcs_per_port); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "pf_rl_en %d, pf_wfq_en %d, vport_rl_en %d, vport_wfq_en %d, pf_wfq %d, pf_rl %d, num_pf_rls %d, pq_flags %x\n", + qm_info->pf_rl_en, + qm_info->pf_wfq_en, + qm_info->vport_rl_en, + qm_info->vport_wfq_en, + qm_info->pf_wfq, + qm_info->pf_rl, + qm_info->num_pf_rls, qed_get_pq_flags(p_hwfn)); + + /* port table */ + for (i = 0; i < p_hwfn->cdev->num_ports_in_engine; i++) { + port = &(qm_info->qm_port_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "port idx %d, active %d, active_phys_tcs %d, num_pbf_cmd_lines %d, num_btb_blocks %d, reserved %d\n", + i, + port->active, + port->active_phys_tcs, + port->num_pbf_cmd_lines, + port->num_btb_blocks, port->reserved); + } + + /* vport table */ + for (i = 0; i < qm_info->num_vports; i++) { + vport = &(qm_info->qm_vport_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "vport idx %d, wfq %d, first_tx_pq_id [ ", + qm_info->start_vport + i, vport->wfq); + for (tc = 0; tc < NUM_OF_TCS; tc++) + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "%d ", vport->first_tx_pq_id[tc]); + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "]\n"); + } + + /* pq table */ + for (i = 0; i < qm_info->num_pqs; i++) { + pq = &(qm_info->qm_pq_params[i]); + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "pq idx %d, port %d, vport_id %d, tc %d, wrr_grp %d, rl_valid %d rl_id %d\n", + qm_info->start_pq + i, + pq->port_id, + pq->vport_id, + pq->tc_id, pq->wrr_group, pq->rl_valid, pq->rl_id); + } +} + +static void qed_init_qm_info(struct qed_hwfn *p_hwfn) +{ + /* reset params required for init run */ + qed_init_qm_reset_params(p_hwfn); + + /* init QM top level params */ + qed_init_qm_params(p_hwfn); + + /* init QM port params */ + qed_init_qm_port_params(p_hwfn); + + /* init QM vport params */ + qed_init_qm_vport_params(p_hwfn); + + /* init QM physical queue params */ + qed_init_qm_pq_params(p_hwfn); + + /* display all that init */ + qed_dp_init_qm_params(p_hwfn); +} + +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime array + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + bool b_rc; + int rc; + + /* initialize qed's qm data structure */ + qed_init_qm_info(p_hwfn); + + /* stop PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + /* prepare QM portion of runtime array */ + qed_qm_init_pf(p_hwfn, p_ptt, false); + + /* activate init tool on runtime array */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + if (rc) + return rc; + + /* start PF's qm queues */ + spin_lock_bh(&qm_lock); + b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + spin_unlock_bh(&qm_lock); + if (!b_rc) + return -EINVAL; + + return 0; +} + +static int qed_alloc_qm_data(struct qed_hwfn *p_hwfn) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + int rc; + + rc = qed_init_qm_sanity(p_hwfn); + if (rc) + goto alloc_err; + + qm_info->qm_pq_params = kcalloc(qed_init_qm_get_num_pqs(p_hwfn), + sizeof(*qm_info->qm_pq_params), + GFP_KERNEL); + if (!qm_info->qm_pq_params) + goto alloc_err; + + qm_info->qm_vport_params = kcalloc(qed_init_qm_get_num_vports(p_hwfn), + sizeof(*qm_info->qm_vport_params), + GFP_KERNEL); + if (!qm_info->qm_vport_params) + goto alloc_err; + + qm_info->qm_port_params = kcalloc(p_hwfn->cdev->num_ports_in_engine, + sizeof(*qm_info->qm_port_params), + GFP_KERNEL); + if (!qm_info->qm_port_params) + goto alloc_err; + + qm_info->wfq_data = kcalloc(qed_init_qm_get_num_vports(p_hwfn), + sizeof(*qm_info->wfq_data), + GFP_KERNEL); + if (!qm_info->wfq_data) + goto alloc_err; + + return 0; + +alloc_err: + DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n"); + qed_qm_info_free(p_hwfn); + return -ENOMEM; +} + +int qed_resc_alloc(struct qed_dev *cdev) +{ + u32 rdma_tasks, excess_tasks; + u32 line_count; + int i, rc = 0; + + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) { + rc = qed_l2_alloc(&cdev->hwfns[i]); + if (rc) + return rc; + } + return rc; + } + + cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL); + if (!cdev->fw_data) + return -ENOMEM; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u32 n_eqes, num_cons; + + /* Initialize the doorbell recovery mechanism */ + rc = qed_db_recovery_setup(p_hwfn); + if (rc) + goto alloc_err; + + /* First allocate the context manager structure */ + rc = qed_cxt_mngr_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* Set the HW cid/tid numbers (in the contest manager) + * Must be done prior to any further computations. + */ + rc = qed_cxt_set_pf_params(p_hwfn, RDMA_MAX_TIDS); + if (rc) + goto alloc_err; + + rc = qed_alloc_qm_data(p_hwfn); + if (rc) + goto alloc_err; + + /* init qm info */ + qed_init_qm_info(p_hwfn); + + /* Compute the ILT client partition */ + rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); + if (rc) { + DP_NOTICE(p_hwfn, + "too many ILT lines; re-computing with less lines\n"); + /* In case there are not enough ILT lines we reduce the + * number of RDMA tasks and re-compute. + */ + excess_tasks = + qed_cxt_cfg_ilt_compute_excess(p_hwfn, line_count); + if (!excess_tasks) + goto alloc_err; + + rdma_tasks = RDMA_MAX_TIDS - excess_tasks; + rc = qed_cxt_set_pf_params(p_hwfn, rdma_tasks); + if (rc) + goto alloc_err; + + rc = qed_cxt_cfg_ilt_compute(p_hwfn, &line_count); + if (rc) { + DP_ERR(p_hwfn, + "failed ILT compute. Requested too many lines: %u\n", + line_count); + + goto alloc_err; + } + } + + /* CID map / ILT shadow table / T2 + * The talbes sizes are determined by the computations above + */ + rc = qed_cxt_tables_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SPQ, must follow ILT because initializes SPQ context */ + rc = qed_spq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SP status block allocation */ + p_hwfn->p_dpc_ptt = qed_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + + rc = qed_int_alloc(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + goto alloc_err; + + rc = qed_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* EQ */ + n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain); + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + u32 n_srq = qed_cxt_get_total_srq_count(p_hwfn); + enum protocol_type rdma_proto; + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + rdma_proto = PROTOCOLID_ROCE; + else + rdma_proto = PROTOCOLID_IWARP; + + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, + rdma_proto, + NULL) * 2; + /* EQ should be able to get events from all SRQ's + * at the same time + */ + n_eqes += num_cons + 2 * MAX_NUM_VFS_BB + n_srq; + } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + num_cons = + qed_cxt_get_proto_cid_count(p_hwfn, + PROTOCOLID_TCP_ULP, + NULL); + n_eqes += 2 * num_cons; + } + + if (n_eqes > 0xFFFF) { + DP_ERR(p_hwfn, + "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n", + n_eqes, 0xFFFF); + goto alloc_no_mem; + } + + rc = qed_eq_alloc(p_hwfn, (u16)n_eqes); + if (rc) + goto alloc_err; + + rc = qed_consq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + rc = qed_l2_alloc(p_hwfn); + if (rc) + goto alloc_err; + +#ifdef CONFIG_QED_LL2 + if (p_hwfn->using_ll2) { + rc = qed_ll2_alloc(p_hwfn); + if (rc) + goto alloc_err; + } +#endif + + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + rc = qed_fcoe_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + rc = qed_iscsi_alloc(p_hwfn); + if (rc) + goto alloc_err; + rc = qed_ooo_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + rc = qed_nvmetcp_alloc(p_hwfn); + if (rc) + goto alloc_err; + rc = qed_ooo_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + + if (QED_IS_RDMA_PERSONALITY(p_hwfn)) { + rc = qed_rdma_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + } + + /* DMA info initialization */ + rc = qed_dmae_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* DCBX initialization */ + rc = qed_dcbx_info_alloc(p_hwfn); + if (rc) + goto alloc_err; + + rc = qed_dbg_alloc_user_data(p_hwfn, &p_hwfn->dbg_user_info); + if (rc) + goto alloc_err; + } + + rc = qed_llh_alloc(cdev); + if (rc) { + DP_NOTICE(cdev, + "Failed to allocate memory for the llh_info structure\n"); + goto alloc_err; + } + + cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL); + if (!cdev->reset_stats) + goto alloc_no_mem; + + return 0; + +alloc_no_mem: + rc = -ENOMEM; +alloc_err: + qed_resc_free(cdev); + return rc; +} + +static int qed_fw_err_handler(struct qed_hwfn *p_hwfn, + u8 opcode, + u16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + if (fw_return_code != COMMON_ERR_CODE_ERROR) + goto eqe_unexpected; + + if (data->err_data.recovery_scope == ERR_SCOPE_FUNC && + le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) { + qed_sriov_vfpf_malicious(p_hwfn, &data->err_data); + return 0; + } + +eqe_unexpected: + DP_ERR(p_hwfn, + "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n", + opcode, fw_return_code, echo); + return -EINVAL; +} + +static int qed_common_eqe_event(struct qed_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data, + u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + case COMMON_EVENT_VF_FLR: + return qed_sriov_eqe_event(p_hwfn, opcode, echo, data, + fw_return_code); + case COMMON_EVENT_FW_ERROR: + return qed_fw_err_handler(p_hwfn, opcode, + le16_to_cpu(echo), data, + fw_return_code); + default: + DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n", + opcode, echo); + return -EINVAL; + } +} + +void qed_resc_setup(struct qed_dev *cdev) +{ + int i; + + if (IS_VF(cdev)) { + for_each_hwfn(cdev, i) + qed_l2_setup(&cdev->hwfns[i]); + return; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + qed_cxt_mngr_setup(p_hwfn); + qed_spq_setup(p_hwfn); + qed_eq_setup(p_hwfn); + qed_consq_setup(p_hwfn); + + /* Read shadow of current MFW mailbox */ + qed_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); + memcpy(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); + + qed_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + qed_l2_setup(p_hwfn); + qed_iov_setup(p_hwfn); + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_common_eqe_event); +#ifdef CONFIG_QED_LL2 + if (p_hwfn->using_ll2) + qed_ll2_setup(p_hwfn); +#endif + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + qed_fcoe_setup(p_hwfn); + + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) { + qed_iscsi_setup(p_hwfn); + qed_ooo_setup(p_hwfn); + } + + if (p_hwfn->hw_info.personality == QED_PCI_NVMETCP) { + qed_nvmetcp_setup(p_hwfn); + qed_ooo_setup(p_hwfn); + } + } +} + +#define FINAL_CLEANUP_POLL_CNT (100) +#define FINAL_CLEANUP_POLL_TIME (10) +int qed_final_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 id, bool is_vf) +{ + u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; + int rc = -EBUSY; + + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id); + if (is_vf) + id += 0x10; + + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; + + /* Make sure notification is not set before initiating final cleanup */ + if (REG_RD(p_hwfn, addr)) { + DP_NOTICE(p_hwfn, + "Unexpected; Found final cleanup notification before initiating final cleanup\n"); + REG_WR(p_hwfn, addr, 0); + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Sending final cleanup for PFVF[%d] [Command %08x]\n", + id, command); + + qed_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, command); + + /* Poll until completion */ + while (!REG_RD(p_hwfn, addr) && count--) + msleep(FINAL_CLEANUP_POLL_TIME); + + if (REG_RD(p_hwfn, addr)) + rc = 0; + else + DP_NOTICE(p_hwfn, + "Failed to receive FW final cleanup notification\n"); + + /* Cleanup afterwards */ + REG_WR(p_hwfn, addr, 0); + + return rc; +} + +static int qed_calc_hw_mode(struct qed_hwfn *p_hwfn) +{ + int hw_mode = 0; + + if (QED_IS_BB_B0(p_hwfn->cdev)) { + hw_mode |= 1 << MODE_BB; + } else if (QED_IS_AH(p_hwfn->cdev)) { + hw_mode |= 1 << MODE_K2; + } else { + DP_NOTICE(p_hwfn, "Unknown chip type %#x\n", + p_hwfn->cdev->type); + return -EINVAL; + } + + switch (p_hwfn->cdev->num_ports_in_engine) { + case 1: + hw_mode |= 1 << MODE_PORTS_PER_ENG_1; + break; + case 2: + hw_mode |= 1 << MODE_PORTS_PER_ENG_2; + break; + case 4: + hw_mode |= 1 << MODE_PORTS_PER_ENG_4; + break; + default: + DP_NOTICE(p_hwfn, "num_ports_in_engine = %d not supported\n", + p_hwfn->cdev->num_ports_in_engine); + return -EINVAL; + } + + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) + hw_mode |= 1 << MODE_MF_SD; + else + hw_mode |= 1 << MODE_MF_SI; + + hw_mode |= 1 << MODE_ASIC; + + if (p_hwfn->cdev->num_hwfns > 1) + hw_mode |= 1 << MODE_100G; + + p_hwfn->hw_info.hw_mode = hw_mode; + + DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP), + "Configuring function for hw_mode: 0x%08x\n", + p_hwfn->hw_info.hw_mode); + + return 0; +} + +/* Init run time data for all PFs on an engine. */ +static void qed_init_cau_rt_data(struct qed_dev *cdev) +{ + u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; + int i, igu_sb_id; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_igu_info *p_igu_info; + struct qed_igu_block *p_block; + struct cau_sb_entry sb_entry; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(cdev); igu_sb_id++) { + p_block = &p_igu_info->entry[igu_sb_id]; + + if (!p_block->is_pf) + continue; + + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + igu_sb_id * 2, + sb_entry); + } + } +} + +static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 val, wr_mbs, cache_line_size; + + val = qed_rd(p_hwfn, p_ptt, PSWRQ2_REG_WR_MBS0); + switch (val) { + case 0: + wr_mbs = 128; + break; + case 1: + wr_mbs = 256; + break; + case 2: + wr_mbs = 512; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of PSWRQ2_REG_WR_MBS0 [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + val); + return; + } + + cache_line_size = min_t(u32, L1_CACHE_BYTES, wr_mbs); + switch (cache_line_size) { + case 32: + val = 0; + break; + case 64: + val = 1; + break; + case 128: + val = 2; + break; + case 256: + val = 3; + break; + default: + DP_INFO(p_hwfn, + "Unexpected value of cache line size [0x%x]. Avoid configuring PGLUE_B_REG_CACHE_LINE_SIZE.\n", + cache_line_size); + } + + if (wr_mbs < L1_CACHE_BYTES) + DP_INFO(p_hwfn, + "The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n", + L1_CACHE_BYTES, wr_mbs); + + STORE_RT_REG(p_hwfn, PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET, val); + if (val > 0) { + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET, val); + STORE_RT_REG(p_hwfn, PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET, val); + } +} + +static int qed_hw_init_common(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int hw_mode) +{ + struct qed_qm_info *qm_info = &p_hwfn->qm_info; + struct qed_qm_common_rt_init_params *params; + struct qed_dev *cdev = p_hwfn->cdev; + u8 vf_id, max_num_vfs; + u16 num_pfs, pf_id; + u32 concrete_fid; + int rc = 0; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + DP_NOTICE(p_hwfn->cdev, + "Failed to allocate common init params\n"); + + return -ENOMEM; + } + + qed_init_cau_rt_data(cdev); + + /* Program GTT windows */ + qed_gtt_init(p_hwfn); + + if (p_hwfn->mcp_info) { + if (p_hwfn->mcp_info->func_info.bandwidth_max) + qm_info->pf_rl_en = true; + if (p_hwfn->mcp_info->func_info.bandwidth_min) + qm_info->pf_wfq_en = true; + } + + params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; + params->pf_rl_en = qm_info->pf_rl_en; + params->pf_wfq_en = qm_info->pf_wfq_en; + params->global_rl_en = qm_info->vport_rl_en; + params->vport_wfq_en = qm_info->vport_wfq_en; + params->port_params = qm_info->qm_port_params; + + qed_qm_common_rt_init(p_hwfn, params); + + qed_cxt_hw_init_common(p_hwfn); + + qed_init_cache_line_size(p_hwfn, p_ptt); + + rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + if (rc) + goto out; + + qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); + + if (QED_IS_BB(p_hwfn->cdev)) { + num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev); + for (pf_id = 0; pf_id < num_pfs; pf_id++) { + qed_fid_pretend(p_hwfn, p_ptt, pf_id); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } + + max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB; + for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { + concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id); + qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); + qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0); + qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1); + qed_wr(p_hwfn, p_ptt, TCFC_REG_WEAK_ENABLE_VF, 0x0); + } + /* pretend to original PF */ + qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + +out: + kfree(params); + + return rc; +} + +static int +qed_hw_init_dpi_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 pwm_region_size, u32 n_cpus) +{ + u32 dpi_bit_shift, dpi_count, dpi_page_size; + u32 min_dpis; + u32 n_wids; + + /* Calculate DPI size */ + n_wids = max_t(u32, QED_MIN_WIDS, n_cpus); + dpi_page_size = QED_WID_SIZE * roundup_pow_of_two(n_wids); + dpi_page_size = (dpi_page_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1); + dpi_bit_shift = ilog2(dpi_page_size / 4096); + dpi_count = pwm_region_size / dpi_page_size; + + min_dpis = p_hwfn->pf_params.rdma_pf_params.min_dpis; + min_dpis = max_t(u32, QED_MIN_DPIS, min_dpis); + + p_hwfn->dpi_size = dpi_page_size; + p_hwfn->dpi_count = dpi_count; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPI_BIT_SHIFT, dpi_bit_shift); + + if (dpi_count < min_dpis) + return -EINVAL; + + return 0; +} + +enum QED_ROCE_EDPM_MODE { + QED_ROCE_EDPM_MODE_ENABLE = 0, + QED_ROCE_EDPM_MODE_FORCE_ON = 1, + QED_ROCE_EDPM_MODE_DISABLE = 2, +}; + +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) + return false; + + return true; +} + +static int +qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 pwm_regsize, norm_regsize; + u32 non_pwm_conn, min_addr_reg1; + u32 db_bar_size, n_cpus = 1; + u32 roce_edpm_mode; + u32 pf_dems_shift; + int rc = 0; + u8 cond; + + db_bar_size = qed_hw_bar_size(p_hwfn, p_ptt, BAR_ID_1); + if (p_hwfn->cdev->num_hwfns > 1) + db_bar_size /= 2; + + /* Calculate doorbell regions */ + non_pwm_conn = qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, + NULL) + + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, + NULL); + norm_regsize = roundup(QED_PF_DEMS_SIZE * non_pwm_conn, PAGE_SIZE); + min_addr_reg1 = norm_regsize / 4096; + pwm_regsize = db_bar_size - norm_regsize; + + /* Check that the normal and PWM sizes are valid */ + if (db_bar_size < norm_regsize) { + DP_ERR(p_hwfn->cdev, + "Doorbell BAR size 0x%x is too small (normal region is 0x%0x )\n", + db_bar_size, norm_regsize); + return -EINVAL; + } + + if (pwm_regsize < QED_MIN_PWM_REGION) { + DP_ERR(p_hwfn->cdev, + "PWM region size 0x%0x is too small. Should be at least 0x%0x (Doorbell BAR size is 0x%x and normal region size is 0x%0x)\n", + pwm_regsize, + QED_MIN_PWM_REGION, db_bar_size, norm_regsize); + return -EINVAL; + } + + /* Calculate number of DPIs */ + roce_edpm_mode = p_hwfn->pf_params.rdma_pf_params.roce_edpm_mode; + if ((roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE) || + ((roce_edpm_mode == QED_ROCE_EDPM_MODE_FORCE_ON))) { + /* Either EDPM is mandatory, or we are attempting to allocate a + * WID per CPU. + */ + n_cpus = num_present_cpus(); + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + } + + cond = (rc && (roce_edpm_mode == QED_ROCE_EDPM_MODE_ENABLE)) || + (roce_edpm_mode == QED_ROCE_EDPM_MODE_DISABLE); + if (cond || p_hwfn->dcbx_no_edpm) { + /* Either EDPM is disabled from user configuration, or it is + * disabled via DCBx, or it is not mandatory and we failed to + * allocated a WID per CPU. + */ + n_cpus = 1; + rc = qed_hw_init_dpi_size(p_hwfn, p_ptt, pwm_regsize, n_cpus); + + if (cond) + qed_rdma_dpm_bar(p_hwfn, p_ptt); + } + + p_hwfn->wid_count = (u16)n_cpus; + + DP_INFO(p_hwfn, + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", + norm_regsize, + pwm_regsize, + p_hwfn->dpi_size, + p_hwfn->dpi_count, + (!qed_edpm_enabled(p_hwfn)) ? + "disabled" : "enabled", PAGE_SIZE); + + if (rc) { + DP_ERR(p_hwfn, + "Failed to allocate enough DPIs. Allocated %d but the current minimum is %d.\n", + p_hwfn->dpi_count, + p_hwfn->pf_params.rdma_pf_params.min_dpis); + return -EINVAL; + } + + p_hwfn->dpi_start_offset = norm_regsize; + + /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ + pf_dems_shift = ilog2(QED_PF_DEMS_SIZE / 4); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); + + return 0; +} + +static int qed_hw_init_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int hw_mode) +{ + int rc = 0; + + /* In CMT the gate should be cleared by the 2nd hwfn */ + if (!QED_IS_CMT(p_hwfn->cdev) || !IS_LEAD_HWFN(p_hwfn)) + STORE_RT_REG(p_hwfn, NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET, 0); + + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode); + if (rc) + return rc; + + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE, 0); + + return 0; +} + +static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn, + int hw_mode, + bool b_hw_start, + enum qed_int_mode int_mode, + bool allow_npar_tx_switch) +{ + u8 rel_pf_id = p_hwfn->rel_pf_id; + int rc = 0; + + if (p_hwfn->mcp_info) { + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + if (p_info->bandwidth_min) + p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; + + /* Update rate limit once we'll actually have a link */ + p_hwfn->qm_info.pf_rl = 100000; + } + + qed_cxt_hw_init_pf(p_hwfn, p_ptt); + + qed_int_igu_init_rt(p_hwfn); + + /* Set VLAN in NIG if needed */ + if (hw_mode & BIT(MODE_MF_SD)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, "Configuring LLH_FUNC_TAG\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, + p_hwfn->hw_info.ovlan); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring LLH_FUNC_FILTER_HDR_SEL\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET, + 1); + } + + /* Enable classification by MAC if needed */ + if (hw_mode & BIT(MODE_MF_SI)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Configuring TAGMAC_CLS_TYPE\n"); + STORE_RT_REG(p_hwfn, + NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); + } + + /* Protocol Configuration */ + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, + ((p_hwfn->hw_info.personality == QED_PCI_ISCSI) || + (p_hwfn->hw_info.personality == QED_PCI_NVMETCP)) ? 1 : 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, + (p_hwfn->hw_info.personality == QED_PCI_FCOE) ? 1 : 0); + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0); + + /* Sanity check before the PF init sequence that uses DMAE */ + rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase"); + if (rc) + return rc; + + /* PF Init sequence */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ + rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + qed_fw_overlay_init_ram(p_hwfn, p_ptt, p_hwfn->fw_overlay_mem); + + /* Pure runtime initializations - directly to the HW */ + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + + rc = qed_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); + if (rc) + return rc; + + /* Use the leading hwfn since in CMT only NIG #0 is operational */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_llh_hw_init_pf(p_hwfn, p_ptt); + if (rc) + return rc; + } + + if (b_hw_start) { + /* enable interrupts */ + qed_int_igu_enable(p_hwfn, p_ptt, int_mode); + + /* send function start command */ + rc = qed_sp_pf_start(p_hwfn, p_ptt, p_tunn, + allow_npar_tx_switch); + if (rc) { + DP_NOTICE(p_hwfn, "Function start ramrod failed\n"); + return rc; + } + if (p_hwfn->hw_info.personality == QED_PCI_FCOE) { + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1, BIT(2)); + qed_wr(p_hwfn, p_ptt, + PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST, + 0x100); + } + } + return rc; +} + +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable) +{ + u32 delay_idx = 0, val, set_val = b_enable ? 1 : 0; + + /* Configure the PF's internal FID_enable for master transactions */ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + + /* Wait until value is set - try for 1 second every 50us */ + for (delay_idx = 0; delay_idx < 20000; delay_idx++) { + val = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + if (val == set_val) + break; + + usleep_range(50, 60); + } + + if (val != set_val) { + DP_NOTICE(p_hwfn, + "PFID_ENABLE_MASTER wasn't changed after a second\n"); + return -EAGAIN; + } + + return 0; +} + +static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_main_ptt) +{ + /* Read shadow of current MFW mailbox */ + qed_mcp_read_mb(p_hwfn, p_main_ptt); + memcpy(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, p_hwfn->mcp_info->mfw_mb_length); +} + +static void +qed_fill_load_req_params(struct qed_load_req_params *p_load_req, + struct qed_drv_load_params *p_drv_load) +{ + memset(p_load_req, 0, sizeof(*p_load_req)); + + p_load_req->drv_role = p_drv_load->is_crash_kernel ? + QED_DRV_ROLE_KDUMP : QED_DRV_ROLE_OS; + p_load_req->timeout_val = p_drv_load->mfw_timeout_val; + p_load_req->avoid_eng_reset = p_drv_load->avoid_eng_reset; + p_load_req->override_force_load = p_drv_load->override_force_load; +} + +static int qed_vf_start(struct qed_hwfn *p_hwfn, + struct qed_hw_init_params *p_params) +{ + if (p_params->p_tunn) { + qed_vf_set_vf_start_tunn_update_param(p_params->p_tunn); + qed_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); + } + + p_hwfn->b_int_enabled = true; + + return 0; +} + +static void qed_pglueb_clear_err(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, + BIT(p_hwfn->abs_pf_id)); +} + +int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) +{ + struct qed_load_req_params load_req_params; + u32 load_code, resp, param, drv_mb_param; + bool b_default_mtu = true; + struct qed_hwfn *p_hwfn; + const u32 *fw_overlays; + u32 fw_overlays_len; + u16 ether_type; + int rc = 0, i; + + if ((p_params->int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + + if (IS_PF(cdev)) { + rc = qed_init_fw_data(cdev, p_params->bin_fw_data); + if (rc) + return rc; + } + + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + + /* If management didn't provide a default, set one of our own */ + if (!p_hwfn->hw_info.mtu) { + p_hwfn->hw_info.mtu = 1500; + b_default_mtu = false; + } + + if (IS_VF(cdev)) { + qed_vf_start(p_hwfn, p_params); + continue; + } + + /* Some flows may keep variable set */ + p_hwfn->mcp_info->mcp_handling_status = 0; + + rc = qed_calc_hw_mode(p_hwfn); + if (rc) + return rc; + + if (IS_PF(cdev) && (test_bit(QED_MF_8021Q_TAGGING, + &cdev->mf_bits) || + test_bit(QED_MF_8021AD_TAGGING, + &cdev->mf_bits))) { + if (test_bit(QED_MF_8021Q_TAGGING, &cdev->mf_bits)) + ether_type = ETH_P_8021Q; + else + ether_type = ETH_P_8021AD; + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, + ether_type); + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, + ether_type); + } + + qed_fill_load_req_params(&load_req_params, + p_params->p_drv_load_params); + rc = qed_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, + &load_req_params); + if (rc) { + DP_NOTICE(p_hwfn, "Failed sending a LOAD_REQ command\n"); + return rc; + } + + load_code = load_req_params.load_code; + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load request was sent. Load code: 0x%x\n", + load_code); + + /* Only relevant for recovery: + * Clear the indication after LOAD_REQ is responded by the MFW. + */ + cdev->recov_in_prog = false; + + qed_mcp_set_capabilities(p_hwfn, p_hwfn->p_main_ptt); + + qed_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); + + /* Clean up chip from previous driver if such remains exist. + * This is not needed when the PF is the first one on the + * engine, since afterwards we are going to init the FW. + */ + if (load_code != FW_MSG_CODE_DRV_LOAD_ENGINE) { + rc = qed_final_cleanup(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->rel_pf_id, false); + if (rc) { + qed_hw_err_notify(p_hwfn, p_hwfn->p_main_ptt, + QED_HW_ERR_RAMROD_FAIL, + "Final cleanup failed\n"); + goto load_err; + } + } + + /* Log and clear previous pglue_b errors if such exist */ + qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true); + + /* Enable the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt, + true); + if (rc) + goto load_err; + + /* Clear the pglue_b was_error indication. + * In E4 it must be done after the BME and the internal + * FID_enable for the PF are set, since VDMs may cause the + * indication to be set again. + */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + + fw_overlays = cdev->fw_data->fw_overlays; + fw_overlays_len = cdev->fw_data->fw_overlays_len; + p_hwfn->fw_overlay_mem = + qed_fw_overlay_mem_alloc(p_hwfn, fw_overlays, + fw_overlays_len); + if (!p_hwfn->fw_overlay_mem) { + DP_NOTICE(p_hwfn, + "Failed to allocate fw overlay memory\n"); + rc = -ENOMEM; + goto load_err; + } + + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + fallthrough; + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = qed_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + + fallthrough; + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, + p_params->p_tunn, + p_hwfn->hw_info.hw_mode, + p_params->b_hw_start, + p_params->int_mode, + p_params->allow_npar_tx_switch); + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected load code [0x%08x]", load_code); + rc = -EINVAL; + break; + } + + if (rc) { + DP_NOTICE(p_hwfn, + "init phase failed for loadcode 0x%x (rc %d)\n", + load_code, rc); + goto load_err; + } + + rc = qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + return rc; + + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, + QED_MSG_DCB, + "sending phony dcbx set command to trigger DCBx attention handling\n"); + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &resp, ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send DCBX attention request\n"); + return rc; + } + + p_hwfn->hw_init_done = true; + } + + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + + /* Get pre-negotiated values for stag, bandwidth etc. */ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n"); + drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_GET_OEM_UPDATES, + drv_mb_param, &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send GET_OEM_UPDATES attention request\n"); + + drv_mb_param = STORM_FW_VERSION; + rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, + drv_mb_param, &load_code, ¶m); + if (rc) + DP_INFO(p_hwfn, "Failed to update firmware version\n"); + + if (!b_default_mtu) { + rc = qed_mcp_ov_update_mtu(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.mtu); + if (rc) + DP_INFO(p_hwfn, + "Failed to update default mtu\n"); + } + + rc = qed_mcp_ov_update_driver_state(p_hwfn, + p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_DISABLED); + if (rc) + DP_INFO(p_hwfn, "Failed to update driver state\n"); + + rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_ESWITCH_NONE); + if (rc) + DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); + } + + return 0; + +load_err: + /* The MFW load lock should be released also when initialization fails. + */ + qed_mcp_load_done(p_hwfn, p_hwfn->p_main_ptt); + return rc; +} + +#define QED_HW_STOP_RETRY_LIMIT (10) +static void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int i; + + /* close timers */ + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + + if (cdev->recov_in_prog) + return; + + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + /* Dependent on number of connection/tasks, possibly + * 1ms sleep is required between polls + */ + usleep_range(1000, 2000); + } + + if (i < QED_HW_STOP_RETRY_LIMIT) + return; + + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); +} + +void qed_hw_timers_stop_all(struct qed_dev *cdev) +{ + int j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); + } +} + +int qed_hw_stop(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc, rc2 = 0; + int j; + + for_each_hwfn(cdev, j) { + p_hwfn = &cdev->hwfns[j]; + p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n"); + + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + rc = qed_vf_pf_reset(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "qed_vf_pf_reset failed. rc = %d.\n", + rc); + rc2 = -EINVAL; + } + continue; + } + + /* mark the hw as uninitialized... */ + p_hwfn->hw_init_done = false; + + /* Send unload command to MCP */ + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_req(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_REQ command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } + } + + qed_slowpath_irq_sync(p_hwfn); + + /* After this point no MFW attentions are expected, e.g. prevent + * race between pf stop and dcbx pf update. + */ + rc = qed_sp_pf_stop(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to close PF against FW [rc = %d]. Continue to stop HW to prevent illegal host access by the device.\n", + rc); + rc2 = -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); + + /* Disable Attention Generation */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); + + /* Need to wait 1ms to guarantee SBs are cleared */ + usleep_range(1000, 2000); + + /* Disable PF in HW blocks */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DB_ENABLE, 0); + qed_wr(p_hwfn, p_ptt, QM_REG_PF_EN, 0); + + if (IS_LEAD_HWFN(p_hwfn) && + test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits) && + !QED_IS_FCOE_PERSONALITY(p_hwfn)) + qed_llh_remove_mac_filter(cdev, 0, + p_hwfn->hw_info.hw_mac_addr); + + if (!cdev->recov_in_prog) { + rc = qed_mcp_unload_done(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed sending a UNLOAD_DONE command. rc = %d.\n", + rc); + rc2 = -EINVAL; + } + } + } + + if (IS_PF(cdev) && !cdev->recov_in_prog) { + p_hwfn = QED_LEADING_HWFN(cdev); + p_ptt = QED_LEADING_HWFN(cdev)->p_main_ptt; + + /* Clear the PF's internal FID_enable in the PXP. + * In CMT this should only be done for first hw-function, and + * only after all transactions have stopped for all active + * hw-functions. + */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) { + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + rc2 = -EINVAL; + } + } + + return rc2; +} + +int qed_hw_stop_fastpath(struct qed_dev *cdev) +{ + int j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt; + + if (IS_VF(cdev)) { + qed_vf_pf_int_cleanup(p_hwfn); + continue; + } + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFDOWN, "Shutting down the fastpath\n"); + + qed_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_FCOE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); + + /* Need to wait 1ms to guarantee SBs are cleared */ + usleep_range(1000, 2000); + qed_ptt_release(p_hwfn, p_ptt); + } + + return 0; +} + +int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt; + + if (IS_VF(p_hwfn->cdev)) + return 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (p_hwfn->p_rdma_info && + p_hwfn->p_rdma_info->active && p_hwfn->b_rdma_enabled_in_prs) + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0x1); + + /* Re-open incoming traffic */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +/* Free hwfn memory and resources acquired in hw_hwfn_prepare */ +static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) +{ + qed_ptt_pool_free(p_hwfn); + kfree(p_hwfn->hw_info.p_igu_info); + p_hwfn->hw_info.p_igu_info = NULL; +} + +/* Setup bar access */ +static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) +{ + /* clear indirect access */ + if (QED_IS_AH(p_hwfn->cdev)) { + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_E8_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_EC_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F0_F0_K2, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_F4_F0_K2, 0); + } else { + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_88_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_8C_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_90_F0_BB, 0); + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_PGL_ADDR_94_F0_BB, 0); + } + + /* Clean previous pglue_b errors if such exist */ + qed_pglueb_clear_err(p_hwfn, p_hwfn->p_main_ptt); + + /* enable internal target-read */ + qed_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); +} + +static void get_function_id(struct qed_hwfn *p_hwfn) +{ + /* ME Register */ + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); + + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); + + p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; + p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID); + p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); +} + +static void qed_hw_set_feat(struct qed_hwfn *p_hwfn) +{ + u32 *feat_num = p_hwfn->hw_info.feat_num; + struct qed_sb_cnt_info sb_cnt; + u32 non_l2_sbs = 0; + + memset(&sb_cnt, 0, sizeof(sb_cnt)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt); + + if (IS_ENABLED(CONFIG_QED_RDMA) && + QED_IS_RDMA_PERSONALITY(p_hwfn)) { + /* Roce CNQ each requires: 1 status block + 1 CNQ. We divide + * the status blocks equally between L2 / RoCE but with + * consideration as to how many l2 queues / cnqs we have. + */ + feat_num[QED_RDMA_CNQ] = + min_t(u32, sb_cnt.cnt / 2, + RESC_NUM(p_hwfn, QED_RDMA_CNQ_RAM)); + + non_l2_sbs = feat_num[QED_RDMA_CNQ]; + } + if (QED_IS_L2_PERSONALITY(p_hwfn)) { + /* Start by allocating VF queues, then PF's */ + feat_num[QED_VF_L2_QUE] = min_t(u32, + RESC_NUM(p_hwfn, QED_L2_QUEUE), + sb_cnt.iov_cnt); + feat_num[QED_PF_L2_QUE] = min_t(u32, + sb_cnt.cnt - non_l2_sbs, + RESC_NUM(p_hwfn, + QED_L2_QUEUE) - + FEAT_NUM(p_hwfn, + QED_VF_L2_QUE)); + } + + if (QED_IS_FCOE_PERSONALITY(p_hwfn)) + feat_num[QED_FCOE_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + + if (QED_IS_ISCSI_PERSONALITY(p_hwfn)) + feat_num[QED_ISCSI_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + + if (QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + feat_num[QED_NVMETCP_CQ] = min_t(u32, sb_cnt.cnt, + RESC_NUM(p_hwfn, + QED_CMDQS_CQS)); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "#PF_L2_QUEUES=%d VF_L2_QUEUES=%d #ROCE_CNQ=%d FCOE_CQ=%d ISCSI_CQ=%d NVMETCP_CQ=%d #SBS=%d\n", + (int)FEAT_NUM(p_hwfn, QED_PF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_VF_L2_QUE), + (int)FEAT_NUM(p_hwfn, QED_RDMA_CNQ), + (int)FEAT_NUM(p_hwfn, QED_FCOE_CQ), + (int)FEAT_NUM(p_hwfn, QED_ISCSI_CQ), + (int)FEAT_NUM(p_hwfn, QED_NVMETCP_CQ), + (int)sb_cnt.cnt); +} + +const char *qed_hw_get_resc_name(enum qed_resources res_id) +{ + switch (res_id) { + case QED_L2_QUEUE: + return "L2_QUEUE"; + case QED_VPORT: + return "VPORT"; + case QED_RSS_ENG: + return "RSS_ENG"; + case QED_PQ: + return "PQ"; + case QED_RL: + return "RL"; + case QED_MAC: + return "MAC"; + case QED_VLAN: + return "VLAN"; + case QED_RDMA_CNQ_RAM: + return "RDMA_CNQ_RAM"; + case QED_ILT: + return "ILT"; + case QED_LL2_RAM_QUEUE: + return "LL2_RAM_QUEUE"; + case QED_LL2_CTX_QUEUE: + return "LL2_CTX_QUEUE"; + case QED_CMDQS_CQS: + return "CMDQS_CQS"; + case QED_RDMA_STATS_QUEUE: + return "RDMA_STATS_QUEUE"; + case QED_BDQ: + return "BDQ"; + case QED_SB: + return "SB"; + default: + return "UNKNOWN_RESOURCE"; + } +} + +static int +__qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp) +{ + int rc; + + rc = qed_mcp_set_resc_max_val(p_hwfn, p_ptt, res_id, + resc_max_val, p_mcp_resp); + if (rc) { + DP_NOTICE(p_hwfn, + "MFW response failure for a max value setting of resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } + + if (*p_mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) + DP_INFO(p_hwfn, + "Failed to set the max value of resource %d [%s]. mcp_resp = 0x%08x.\n", + res_id, qed_hw_get_resc_name(res_id), *p_mcp_resp); + + return 0; +} + +static u32 qed_hsi_def_val[][MAX_CHIP_IDS] = { + {MAX_NUM_VFS_BB, MAX_NUM_VFS_K2}, + {MAX_NUM_L2_QUEUES_BB, MAX_NUM_L2_QUEUES_K2}, + {MAX_NUM_PORTS_BB, MAX_NUM_PORTS_K2}, + {MAX_SB_PER_PATH_BB, MAX_SB_PER_PATH_K2,}, + {MAX_NUM_PFS_BB, MAX_NUM_PFS_K2}, + {MAX_NUM_VPORTS_BB, MAX_NUM_VPORTS_K2}, + {ETH_RSS_ENGINE_NUM_BB, ETH_RSS_ENGINE_NUM_K2}, + {MAX_QM_TX_QUEUES_BB, MAX_QM_TX_QUEUES_K2}, + {PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2}, + {RDMA_NUM_STATISTIC_COUNTERS_BB, RDMA_NUM_STATISTIC_COUNTERS_K2}, + {MAX_QM_GLOBAL_RLS, MAX_QM_GLOBAL_RLS}, + {PBF_MAX_CMD_LINES, PBF_MAX_CMD_LINES}, + {BTB_MAX_BLOCKS_BB, BTB_MAX_BLOCKS_K2}, +}; + +u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type) +{ + enum chip_ids chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; + + if (type >= QED_NUM_HSI_DEFS) { + DP_ERR(cdev, "Unexpected HSI definition type [%d]\n", type); + return 0; + } + + return qed_hsi_def_val[type][chip_id]; +} + +static int +qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resc_max_val, mcp_resp; + u8 res_id; + int rc; + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { + switch (res_id) { + case QED_LL2_RAM_QUEUE: + resc_max_val = MAX_NUM_LL2_RX_RAM_QUEUES; + break; + case QED_LL2_CTX_QUEUE: + resc_max_val = MAX_NUM_LL2_RX_CTX_QUEUES; + break; + case QED_RDMA_CNQ_RAM: + /* No need for a case for QED_CMDQS_CQS since + * CNQ/CMDQS are the same resource. + */ + resc_max_val = NUM_OF_GLOBAL_QUEUES; + break; + case QED_RDMA_STATS_QUEUE: + resc_max_val = + NUM_OF_RDMA_STATISTIC_COUNTERS(p_hwfn->cdev); + break; + case QED_BDQ: + resc_max_val = BDQ_NUM_RESOURCES; + break; + default: + continue; + } + + rc = __qed_hw_set_soft_resc_size(p_hwfn, p_ptt, res_id, + resc_max_val, &mcp_resp); + if (rc) + return rc; + + /* There's no point to continue to the next resource if the + * command is not supported by the MFW. + * We do continue if the command is supported but the resource + * is unknown to the MFW. Such a resource will be later + * configured with the default allocation values. + */ + if (mcp_resp == FW_MSG_CODE_UNSUPPORTED) + return -EINVAL; + } + + return 0; +} + +static +int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, + enum qed_resources res_id, + u32 *p_resc_num, u32 *p_resc_start) +{ + u8 num_funcs = p_hwfn->num_funcs_on_engine; + struct qed_dev *cdev = p_hwfn->cdev; + + switch (res_id) { + case QED_L2_QUEUE: + *p_resc_num = NUM_OF_L2_QUEUES(cdev) / num_funcs; + break; + case QED_VPORT: + *p_resc_num = NUM_OF_VPORTS(cdev) / num_funcs; + break; + case QED_RSS_ENG: + *p_resc_num = NUM_OF_RSS_ENGINES(cdev) / num_funcs; + break; + case QED_PQ: + *p_resc_num = NUM_OF_QM_TX_QUEUES(cdev) / num_funcs; + *p_resc_num &= ~0x7; /* The granularity of the PQs is 8 */ + break; + case QED_RL: + *p_resc_num = NUM_OF_QM_GLOBAL_RLS(cdev) / num_funcs; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + *p_resc_num = ETH_NUM_MAC_FILTERS / num_funcs; + break; + case QED_ILT: + *p_resc_num = NUM_OF_PXP_ILT_RECORDS(cdev) / num_funcs; + break; + case QED_LL2_RAM_QUEUE: + *p_resc_num = MAX_NUM_LL2_RX_RAM_QUEUES / num_funcs; + break; + case QED_LL2_CTX_QUEUE: + *p_resc_num = MAX_NUM_LL2_RX_CTX_QUEUES / num_funcs; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs; + break; + case QED_RDMA_STATS_QUEUE: + *p_resc_num = NUM_OF_RDMA_STATISTIC_COUNTERS(cdev) / num_funcs; + break; + case QED_BDQ: + if (p_hwfn->hw_info.personality != QED_PCI_ISCSI && + p_hwfn->hw_info.personality != QED_PCI_FCOE && + p_hwfn->hw_info.personality != QED_PCI_NVMETCP) + *p_resc_num = 0; + else + *p_resc_num = 1; + break; + case QED_SB: + /* Since we want its value to reflect whether MFW supports + * the new scheme, have a default of 0. + */ + *p_resc_num = 0; + break; + default: + return -EINVAL; + } + + switch (res_id) { + case QED_BDQ: + if (!*p_resc_num) + *p_resc_start = 0; + else if (p_hwfn->cdev->num_ports_in_engine == 4) + *p_resc_start = p_hwfn->port_id; + else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) + *p_resc_start = p_hwfn->port_id; + else if (p_hwfn->hw_info.personality == QED_PCI_FCOE) + *p_resc_start = p_hwfn->port_id + 2; + break; + default: + *p_resc_start = *p_resc_num * p_hwfn->enabled_func_idx; + break; + } + + return 0; +} + +static int __qed_hw_set_resc_info(struct qed_hwfn *p_hwfn, + enum qed_resources res_id) +{ + u32 dflt_resc_num = 0, dflt_resc_start = 0; + u32 mcp_resp, *p_resc_num, *p_resc_start; + int rc; + + p_resc_num = &RESC_NUM(p_hwfn, res_id); + p_resc_start = &RESC_START(p_hwfn, res_id); + + rc = qed_hw_get_dflt_resc(p_hwfn, res_id, &dflt_resc_num, + &dflt_resc_start); + if (rc) { + DP_ERR(p_hwfn, + "Failed to get default amount for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } + + rc = qed_mcp_get_resc_info(p_hwfn, p_hwfn->p_main_ptt, res_id, + &mcp_resp, p_resc_num, p_resc_start); + if (rc) { + DP_NOTICE(p_hwfn, + "MFW response failure for an allocation request for resource %d [%s]\n", + res_id, qed_hw_get_resc_name(res_id)); + return rc; + } + + /* Default driver values are applied in the following cases: + * - The resource allocation MB command is not supported by the MFW + * - There is an internal error in the MFW while processing the request + * - The resource ID is unknown to the MFW + */ + if (mcp_resp != FW_MSG_CODE_RESOURCE_ALLOC_OK) { + DP_INFO(p_hwfn, + "Failed to receive allocation info for resource %d [%s]. mcp_resp = 0x%x. Applying default values [%d,%d].\n", + res_id, + qed_hw_get_resc_name(res_id), + mcp_resp, dflt_resc_num, dflt_resc_start); + *p_resc_num = dflt_resc_num; + *p_resc_start = dflt_resc_start; + goto out; + } + +out: + /* PQs have to divide by 8 [that's the HW granularity]. + * Reduce number so it would fit. + */ + if ((res_id == QED_PQ) && ((*p_resc_num % 8) || (*p_resc_start % 8))) { + DP_INFO(p_hwfn, + "PQs need to align by 8; Number %08x --> %08x, Start %08x --> %08x\n", + *p_resc_num, + (*p_resc_num) & ~0x7, + *p_resc_start, (*p_resc_start) & ~0x7); + *p_resc_num &= ~0x7; + *p_resc_start &= ~0x7; + } + + return 0; +} + +static int qed_hw_set_resc_info(struct qed_hwfn *p_hwfn) +{ + int rc; + u8 res_id; + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) { + rc = __qed_hw_set_resc_info(p_hwfn, res_id); + if (rc) + return rc; + } + + return 0; +} + +static int qed_hw_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u8 native_ppfid_idx; + int rc; + + /* Calculation of BB/AH is different for native_ppfid_idx */ + if (QED_IS_BB(cdev)) + native_ppfid_idx = p_hwfn->rel_pf_id; + else + native_ppfid_idx = p_hwfn->rel_pf_id / + cdev->num_ports_in_engine; + + rc = qed_mcp_get_ppfid_bitmap(p_hwfn, p_ptt); + if (rc != 0 && rc != -EOPNOTSUPP) + return rc; + else if (rc == -EOPNOTSUPP) + cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; + + if (!(cdev->ppfid_bitmap & (0x1 << native_ppfid_idx))) { + DP_INFO(p_hwfn, + "Fix the PPFID bitmap to include the native PPFID [native_ppfid_idx %hhd, orig_bitmap 0x%hhx]\n", + native_ppfid_idx, cdev->ppfid_bitmap); + cdev->ppfid_bitmap = 0x1 << native_ppfid_idx; + } + + return 0; +} + +static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_unlock_params resc_unlock_params; + struct qed_resc_lock_params resc_lock_params; + bool b_ah = QED_IS_AH(p_hwfn->cdev); + u8 res_id; + int rc; + + /* Setting the max values of the soft resources and the following + * resources allocation queries should be atomic. Since several PFs can + * run in parallel - a resource lock is needed. + * If either the resource lock or resource set value commands are not + * supported - skip the max values setting, release the lock if + * needed, and proceed to the queries. Other failures, including a + * failure to acquire the lock, will cause this function to fail. + */ + qed_mcp_resc_lock_default_init(&resc_lock_params, &resc_unlock_params, + QED_RESC_LOCK_RESC_ALLOC, false); + + rc = qed_mcp_resc_lock(p_hwfn, p_ptt, &resc_lock_params); + if (rc && rc != -EINVAL) { + return rc; + } else if (rc == -EINVAL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n"); + } else if (!resc_lock_params.b_granted) { + DP_NOTICE(p_hwfn, + "Failed to acquire the resource lock for the resource allocation commands\n"); + return -EBUSY; + } else { + rc = qed_hw_set_soft_resc_size(p_hwfn, p_ptt); + if (rc && rc != -EINVAL) { + DP_NOTICE(p_hwfn, + "Failed to set the max values of the soft resources\n"); + goto unlock_and_exit; + } else if (rc == -EINVAL) { + DP_INFO(p_hwfn, + "Skip the max values setting of the soft resources since it is not supported by the MFW\n"); + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, + &resc_unlock_params); + if (rc) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + } + + rc = qed_hw_set_resc_info(p_hwfn); + if (rc) + goto unlock_and_exit; + + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) { + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); + if (rc) + DP_INFO(p_hwfn, + "Failed to release the resource lock for the resource allocation commands\n"); + } + + /* PPFID bitmap */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_hw_get_ppfid_bitmap(p_hwfn, p_ptt); + if (rc) + return rc; + } + + /* Sanity for ILT */ + if ((b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_K2)) || + (!b_ah && (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB))) { + DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n", + RESC_START(p_hwfn, QED_ILT), + RESC_END(p_hwfn, QED_ILT) - 1); + return -EINVAL; + } + + /* This will also learn the number of SBs from MFW */ + if (qed_int_igu_reset_cam(p_hwfn, p_ptt)) + return -EINVAL; + + qed_hw_set_feat(p_hwfn); + + for (res_id = 0; res_id < QED_MAX_RESC; res_id++) + DP_VERBOSE(p_hwfn, NETIF_MSG_PROBE, "%s = %d start = %d\n", + qed_hw_get_resc_name(res_id), + RESC_NUM(p_hwfn, res_id), + RESC_START(p_hwfn, res_id)); + + return 0; + +unlock_and_exit: + if (resc_lock_params.b_granted && !resc_unlock_params.b_released) + qed_mcp_resc_unlock(p_hwfn, p_ptt, &resc_unlock_params); + return rc; +} + +static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities, fld; + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + struct qed_mcp_link_speed_params *ext_speed; + struct qed_mcp_link_capabilities *p_caps; + struct qed_mcp_link_params *link; + int i; + + /* Read global nvm_cfg address */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + + /* Verify MCP has initialized it */ + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, core_cfg); + + core_cfg = qed_rd(p_hwfn, p_ptt, addr); + + switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> + NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2: + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4: + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode in 0x%08x\n", core_cfg); + break; + } + + /* Read default link configuration */ + link = &p_hwfn->mcp_info->link_input; + p_caps = &p_hwfn->mcp_info->link_capabilities; + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, speed_cap_mask)); + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; + + p_caps->speed_capabilities = link->speed.advertised_speeds; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, link_settings)); + switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> + NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { + case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: + link->speed.autoneg = true; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: + link->speed.forced_speed = 1000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: + link->speed.forced_speed = 10000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_20G: + link->speed.forced_speed = 20000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: + link->speed.forced_speed = 25000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: + link->speed.forced_speed = 40000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: + link->speed.forced_speed = 50000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G: + link->speed.forced_speed = 100000; + break; + default: + DP_NOTICE(p_hwfn, "Unknown Speed in 0x%08x\n", link_temp); + } + + p_caps->default_speed_autoneg = link->speed.autoneg; + + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_DRV_FLOW_CONTROL); + link->pause.autoneg = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(fld & NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + link->loopback_mode = 0; + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_FEC_FORCE_MODE)) { + case NVM_CFG1_PORT_FEC_FORCE_MODE_NONE: + p_caps->fec_default |= QED_FEC_MODE_NONE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE: + p_caps->fec_default |= QED_FEC_MODE_FIRECODE; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_RS: + p_caps->fec_default |= QED_FEC_MODE_RS; + break; + case NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO: + p_caps->fec_default |= QED_FEC_MODE_AUTO; + break; + default: + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "unknown FEC mode in 0x%08x\n", link_temp); + } + } else { + p_caps->fec_default = QED_FEC_MODE_UNSUPPORTED; + } + + link->fec = p_caps->fec_default; + + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { + link_temp = qed_rd(p_hwfn, p_ptt, port_cfg_addr + + offsetof(struct nvm_cfg1_port, ext_phy)); + link_temp &= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK; + link_temp >>= NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET; + p_caps->default_eee = QED_MCP_EEE_ENABLED; + link->eee.enable = true; + switch (link_temp) { + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED: + p_caps->default_eee = QED_MCP_EEE_DISABLED; + link->eee.enable = false; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_BALANCED_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE: + p_caps->eee_lpi_timer = + EEE_TX_TIMER_USEC_AGGRESSIVE_TIME; + break; + case NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY: + p_caps->eee_lpi_timer = EEE_TX_TIMER_USEC_LATENCY_TIME; + break; + } + + link->eee.tx_lpi_timer = p_caps->eee_lpi_timer; + link->eee.tx_lpi_enable = link->eee.enable; + link->eee.adv_caps = QED_EEE_1G_ADV | QED_EEE_10G_ADV; + } else { + p_caps->default_eee = QED_MCP_EEE_UNSUPPORTED; + } + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = &link->ext_speed; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_speed)); + + fld = GET_MFW_FIELD(link_temp, NVM_CFG1_PORT_EXTENDED_SPEED); + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN) + ext_speed->autoneg = true; + + ext_speed->forced_speed = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G) + ext_speed->forced_speed |= QED_EXT_SPEED_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G) + ext_speed->forced_speed |= QED_EXT_SPEED_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G) + ext_speed->forced_speed |= QED_EXT_SPEED_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G) + ext_speed->forced_speed |= QED_EXT_SPEED_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G) + ext_speed->forced_speed |= QED_EXT_SPEED_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4) + ext_speed->forced_speed |= QED_EXT_SPEED_100G_P4; + + fld = GET_MFW_FIELD(link_temp, + NVM_CFG1_PORT_EXTENDED_SPEED_CAP); + + ext_speed->advertised_speeds = 0; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_RES; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_1G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_10G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_20G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_25G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G) + ext_speed->advertised_speeds |= QED_EXT_SPEED_MASK_40G; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_50G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R2; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_R4; + if (fld & NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4) + ext_speed->advertised_speeds |= + QED_EXT_SPEED_MASK_100G_P4; + + link_temp = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + extended_fec_mode)); + link->ext_fec_mode = link_temp; + + p_caps->default_ext_speed_caps = ext_speed->advertised_speeds; + p_caps->default_ext_speed = ext_speed->forced_speed; + p_caps->default_ext_autoneg = ext_speed->autoneg; + p_caps->default_ext_fec = link->ext_fec_mode; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default extended link config: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, FEC: 0x%02x\n", + ext_speed->forced_speed, + ext_speed->advertised_speeds, ext_speed->autoneg, + p_caps->default_ext_fec); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x, EEE: 0x%02x [0x%08x usec], FEC: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg, + p_caps->default_eee, p_caps->eee_lpi_timer, + p_caps->fec_default); + + if (IS_LEAD_HWFN(p_hwfn)) { + struct qed_dev *cdev = p_hwfn->cdev; + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, generic_cont0); + + generic_cont0 = qed_rd(p_hwfn, p_ptt, addr); + + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS); + break; + case NVM_CFG1_GLOB_MF_MODE_UFP: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_UFP_SPECIFIC) | + BIT(QED_MF_8021Q_TAGGING) | + BIT(QED_MF_DONT_ADD_VLAN0_TAG); + break; + case NVM_CFG1_GLOB_MF_MODE_BD: + cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_8021AD_TAGGING) | + BIT(QED_MF_DONT_ADD_VLAN0_TAG); + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST) | + BIT(QED_MF_INTER_PF_SWITCH) | + BIT(QED_MF_DISABLE_ARFS); + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | + BIT(QED_MF_LLH_PROTO_CLSS) | + BIT(QED_MF_LL2_NON_UNICAST); + if (QED_IS_BB(p_hwfn->cdev)) + cdev->mf_bits |= BIT(QED_MF_NEED_DEF_PF); + break; + } + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + cdev->mf_bits); + + /* In CMT the PF is unknown when the GFS block processes the + * packet. Therefore cannot use searcher as it has a per PF + * database, and thus ARFS must be disabled. + * + */ + if (QED_IS_CMT(cdev)) + cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); + } + + DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", + p_hwfn->cdev->mf_bits); + + /* Read device capabilities information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = qed_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + __set_bit(QED_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE) + __set_bit(QED_DEV_CAP_FCOE, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI) + __set_bit(QED_DEV_CAP_ISCSI, + &p_hwfn->hw_info.device_capabilities); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE) + __set_bit(QED_DEV_CAP_ROCE, + &p_hwfn->hw_info.device_capabilities); + + /* Read device serial number information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, serial_number); + + for (i = 0; i < 4; i++) + p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4); + + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); +} + +static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id; + u32 reg_function_hide, tmp, eng_mask, low_pfs_mask; + struct qed_dev *cdev = p_hwfn->cdev; + + num_funcs = QED_IS_AH(cdev) ? MAX_NUM_PFS_K2 : MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT, only the "even" functions are enabled, and thus the + * number of functions for both hwfns is learnt from the same bits. + */ + reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); + + if (reg_function_hide & 0x1) { + if (QED_IS_BB(cdev)) { + if (QED_PATH_ID(p_hwfn) && cdev->num_hwfns == 1) { + num_funcs = 0; + eng_mask = 0xaaaa; + } else { + num_funcs = 1; + eng_mask = 0x5554; + } + } else { + num_funcs = 1; + eng_mask = 0xfffe; + } + + /* Get the number of the enabled functions on the engine */ + tmp = (reg_function_hide ^ 0xffffffff) & eng_mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + + /* Get the PF index within the enabled functions */ + low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1; + tmp = reg_function_hide & eng_mask & low_pfs_mask; + while (tmp) { + if (tmp & 0x1) + enabled_func_idx--; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + p_hwfn->enabled_func_idx = enabled_func_idx; + + DP_VERBOSE(p_hwfn, + NETIF_MSG_PROBE, + "PF [rel_id %d, abs_id %d] occupies index %d within the %d enabled functions on the engine\n", + p_hwfn->rel_pf_id, + p_hwfn->abs_pf_id, + p_hwfn->enabled_func_idx, p_hwfn->num_funcs_on_engine); +} + +static void qed_hw_info_port_num(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr, global_offsize, global_addr, port_mode; + struct qed_dev *cdev = p_hwfn->cdev; + + /* In CMT there is always only one port */ + if (cdev->num_hwfns > 1) { + cdev->num_ports_in_engine = 1; + cdev->num_ports = 1; + return; + } + + /* Determine the number of ports per engine */ + port_mode = qed_rd(p_hwfn, p_ptt, MISC_REG_PORT_MODE); + switch (port_mode) { + case 0x0: + cdev->num_ports_in_engine = 1; + break; + case 0x1: + cdev->num_ports_in_engine = 2; + break; + case 0x2: + cdev->num_ports_in_engine = 4; + break; + default: + DP_NOTICE(p_hwfn, "Unknown port mode 0x%08x\n", port_mode); + cdev->num_ports_in_engine = 1; /* Default to something */ + break; + } + + /* Get the total number of ports of the device */ + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, max_ports); + cdev->num_ports = (u8)qed_rd(p_hwfn, p_ptt, addr); +} + +static void qed_get_eee_caps(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_capabilities *p_caps; + u32 eee_status; + + p_caps = &p_hwfn->mcp_info->link_capabilities; + if (p_caps->default_eee == QED_MCP_EEE_UNSUPPORTED) + return; + + p_caps->eee_speed_caps = 0; + eee_status = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + eee_status = (eee_status & EEE_SUPPORTED_SPEED_MASK) >> + EEE_SUPPORTED_SPEED_OFFSET; + + if (eee_status & EEE_1G_SUPPORTED) + p_caps->eee_speed_caps |= QED_EEE_1G_ADV; + if (eee_status & EEE_10G_ADV) + p_caps->eee_speed_caps |= QED_EEE_10G_ADV; +} + +static int +qed_get_hw_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality personality) +{ + int rc; + + /* Since all information is common, only first hwfns should do this */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_iov_hw_info(p_hwfn); + if (rc) + return rc; + } + + if (IS_LEAD_HWFN(p_hwfn)) + qed_hw_info_port_num(p_hwfn, p_ptt); + + qed_mcp_get_capabilities(p_hwfn, p_ptt); + + qed_hw_get_nvm_info(p_hwfn, p_ptt); + + rc = qed_int_igu_read_cam(p_hwfn, p_ptt); + if (rc) + return rc; + + if (qed_mcp_is_init(p_hwfn)) + ether_addr_copy(p_hwfn->hw_info.hw_mac_addr, + p_hwfn->mcp_info->func_info.mac); + else + eth_random_addr(p_hwfn->hw_info.hw_mac_addr); + + if (qed_mcp_is_init(p_hwfn)) { + if (p_hwfn->mcp_info->func_info.ovlan != QED_MCP_VLAN_UNSET) + p_hwfn->hw_info.ovlan = + p_hwfn->mcp_info->func_info.ovlan; + + qed_mcp_cmd_port_init(p_hwfn, p_ptt); + + qed_get_eee_caps(p_hwfn, p_ptt); + + qed_mcp_read_ufp_config(p_hwfn, p_ptt); + } + + if (qed_mcp_is_init(p_hwfn)) { + enum qed_pci_personality protocol; + + protocol = p_hwfn->mcp_info->func_info.protocol; + p_hwfn->hw_info.personality = protocol; + } + + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + p_hwfn->hw_info.multi_tc_roce_en = true; + + p_hwfn->hw_info.num_hw_tc = NUM_PHYS_TCS_4PORT_K2; + p_hwfn->hw_info.num_active_tc = 1; + + qed_get_num_funcs(p_hwfn, p_ptt); + + if (qed_mcp_is_init(p_hwfn)) + p_hwfn->hw_info.mtu = p_hwfn->mcp_info->func_info.mtu; + + return qed_hw_get_resc(p_hwfn, p_ptt); +} + +static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u16 device_id_mask; + u32 tmp; + + /* Read Vendor Id / Device Id */ + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, &cdev->device_id); + + /* Determine type */ + device_id_mask = cdev->device_id & QED_DEV_ID_MASK; + switch (device_id_mask) { + case QED_DEV_ID_MASK_BB: + cdev->type = QED_DEV_TYPE_BB; + break; + case QED_DEV_ID_MASK_AH: + cdev->type = QED_DEV_TYPE_AH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown device id 0x%x\n", cdev->device_id); + return -EBUSY; + } + + cdev->chip_num = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_NUM); + cdev->chip_rev = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_REV); + + MASK_FIELD(CHIP_REV, cdev->chip_rev); + + /* Learn number of HW-functions */ + tmp = qed_rd(p_hwfn, p_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); + + if (tmp & (1 << p_hwfn->rel_pf_id)) { + DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); + cdev->num_hwfns = 2; + } else { + cdev->num_hwfns = 1; + } + + cdev->chip_bond_id = qed_rd(p_hwfn, p_ptt, + MISCS_REG_CHIP_TEST_REG) >> 4; + MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); + cdev->chip_metal = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_CHIP_METAL); + MASK_FIELD(CHIP_METAL, cdev->chip_metal); + + DP_INFO(cdev->hwfns, + "Chip details - %s %c%d, Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", + QED_IS_BB(cdev) ? "BB" : "AH", + 'A' + cdev->chip_rev, + (int)cdev->chip_metal, + cdev->chip_num, cdev->chip_rev, + cdev->chip_bond_id, cdev->chip_metal); + + return 0; +} + +static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, + void __iomem *p_regview, + void __iomem *p_doorbells, + u64 db_phys_addr, + enum qed_pci_personality personality) +{ + struct qed_dev *cdev = p_hwfn->cdev; + int rc = 0; + + /* Split PCI bars evenly between hwfns */ + p_hwfn->regview = p_regview; + p_hwfn->doorbells = p_doorbells; + p_hwfn->db_phys_addr = db_phys_addr; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_hw_prepare(p_hwfn); + + /* Validate that chip access is feasible */ + if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { + DP_ERR(p_hwfn, + "Reading the ME register returns all Fs; Preventing further chip access\n"); + return -EINVAL; + } + + get_function_id(p_hwfn); + + /* Allocate PTT pool */ + rc = qed_ptt_pool_alloc(p_hwfn); + if (rc) + goto err0; + + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + + /* First hwfn learns basic information, e.g., number of hwfns */ + if (!p_hwfn->my_id) { + rc = qed_get_dev_info(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + goto err1; + } + + qed_hw_hwfn_prepare(p_hwfn); + + /* Initialize MCP structure */ + rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); + if (rc) { + DP_NOTICE(p_hwfn, "Failed initializing mcp command\n"); + goto err1; + } + + /* Read the device configuration information from the HW and SHMEM */ + rc = qed_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to get HW information\n"); + goto err2; + } + + /* Sending a mailbox to the MFW should be done after qed_get_hw_info() + * is called as it sets the ports number in an engine. + */ + if (IS_LEAD_HWFN(p_hwfn) && !cdev->recov_in_prog) { + rc = qed_mcp_initiate_pf_flr(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + DP_NOTICE(p_hwfn, "Failed to initiate PF FLR\n"); + } + + /* NVRAM info initialization and population */ + if (IS_LEAD_HWFN(p_hwfn)) { + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to populate nvm info shadow\n"); + goto err2; + } + } + + /* Allocate the init RT array and initialize the init-ops engine */ + rc = qed_init_alloc(p_hwfn); + if (rc) + goto err3; + + return rc; +err3: + if (IS_LEAD_HWFN(p_hwfn)) + qed_mcp_nvm_info_free(p_hwfn); +err2: + if (IS_LEAD_HWFN(p_hwfn)) + qed_iov_free_hw_info(p_hwfn->cdev); + qed_mcp_free(p_hwfn); +err1: + qed_hw_hwfn_free(p_hwfn); +err0: + return rc; +} + +int qed_hw_prepare(struct qed_dev *cdev, + int personality) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + int rc; + + /* Store the precompiled init data ptrs */ + if (IS_PF(cdev)) + qed_init_iro_array(cdev); + + /* Initialize the first hwfn - will learn number of hwfns */ + rc = qed_hw_prepare_single(p_hwfn, + cdev->regview, + cdev->doorbells, + cdev->db_phys_addr, + personality); + if (rc) + return rc; + + personality = p_hwfn->hw_info.personality; + + /* Initialize the rest of the hwfns */ + if (cdev->num_hwfns > 1) { + void __iomem *p_regview, *p_doorbell; + u64 db_phys_addr; + u32 offset; + + /* adjust bar offset for second engine */ + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_0) / 2; + p_regview = cdev->regview + offset; + + offset = qed_hw_bar_size(p_hwfn, p_hwfn->p_main_ptt, + BAR_ID_1) / 2; + + p_doorbell = cdev->doorbells + offset; + + db_phys_addr = cdev->db_phys_addr + offset; + + /* prepare second hw function */ + rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview, + p_doorbell, db_phys_addr, + personality); + + /* in case of error, need to free the previously + * initiliazed hwfn 0. + */ + if (rc) { + if (IS_PF(cdev)) { + qed_init_free(p_hwfn); + qed_mcp_nvm_info_free(p_hwfn); + qed_mcp_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + } + } + } + + return rc; +} + +void qed_hw_remove(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + int i; + + if (IS_PF(cdev)) + qed_mcp_ov_update_driver_state(p_hwfn, p_hwfn->p_main_ptt, + QED_OV_DRIVER_STATE_NOT_LOADED); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (IS_VF(cdev)) { + qed_vf_pf_release(p_hwfn); + continue; + } + + qed_init_free(p_hwfn); + qed_hw_hwfn_free(p_hwfn); + qed_mcp_free(p_hwfn); + } + + qed_iov_free_hw_info(cdev); + + qed_mcp_nvm_info_free(p_hwfn); +} + +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) { + u16 min, max; + + min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE); + DP_NOTICE(p_hwfn, + "l2_queue id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_L2_QUEUE) + src_id; + + return 0; +} + +int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_VPORT)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_VPORT); + max = min + RESC_NUM(p_hwfn, QED_VPORT); + DP_NOTICE(p_hwfn, + "vport id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_VPORT) + src_id; + + return 0; +} + +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, QED_RSS_ENG)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, QED_RSS_ENG); + max = min + RESC_NUM(p_hwfn, QED_RSS_ENG); + DP_NOTICE(p_hwfn, + "rss_eng id [%d] is not valid, available indices [%d - %d]\n", + src_id, min, max); + + return -EINVAL; + } + + *dst_id = RESC_START(p_hwfn, QED_RSS_ENG) + src_id; + + return 0; +} + +static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 hw_addr, void *p_eth_qzone, + size_t eth_qzone_size, u8 timeset) +{ + struct coalescing_timeset *p_coal_timeset; + + if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n"); + return -EINVAL; + } + + p_coal_timeset = p_eth_qzone; + memset(p_eth_qzone, 0, eth_qzone_size); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset); + SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1); + qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size); + + return 0; +} + +int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle) +{ + struct qed_queue_cid *p_cid = p_handle; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0; + + p_hwfn = p_cid->p_owner; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_set_coalesce(p_hwfn, rx_coal, tx_coal, p_cid); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (rx_coal) { + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + } + + if (tx_coal) { + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, p_cid); + if (rc) + goto out; + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + } +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) +{ + struct ustorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, false); + if (rc) + goto out; + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc) + goto out; + +out: + return rc; +} + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid) +{ + struct xstorm_eth_queue_zone eth_qzone; + u8 timeset, timer_res; + u32 address; + int rc; + + /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */ + if (coalesce <= 0x7F) { + timer_res = 0; + } else if (coalesce <= 0xFF) { + timer_res = 1; + } else if (coalesce <= 0x1FF) { + timer_res = 2; + } else { + DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce); + return -EINVAL; + } + timeset = (u8)(coalesce >> timer_res); + + rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, + p_cid->sb_igu_id, true); + if (rc) + goto out; + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); + + rc = qed_set_coalesce(p_hwfn, p_ptt, address, ð_qzone, + sizeof(struct xstorm_eth_queue_zone), timeset); +out: + return rc; +} + +/* Calculate final WFQ values for all vports and configure them. + * After this configuration each vport will have + * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT) + */ +static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].wfq = (wfq_speed * QED_WFQ_UNIT) / + min_pf_rate; + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].wfq); + } +} + +static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn, + u32 min_pf_rate) + +{ + int i; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) + p_hwfn->qm_info.qm_vport_params[i].wfq = 1; +} + +static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i; + + vport_params = p_hwfn->qm_info.qm_vport_params; + + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + qed_init_wfq_default_param(p_hwfn, min_pf_rate); + qed_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].wfq); + } +} + +/* This function performs several validations for WFQ + * configuration and required min rate for a given vport + * 1. req_rate must be greater than one percent of min_pf_rate. + * 2. req_rate should not cause other vports [not configured for WFQ explicitly] + * rates to get less than one percent of min_pf_rate. + * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate. + */ +static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + if (num_vports < 2) { + DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); + return -EINVAL; + } + + /* Accounting for the vports which are configured for WFQ explicitly */ + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && + p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = num_vports - req_count; + + if (req_rate < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return -EINVAL; + } + + if (num_vports > QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Number of vports is greater than %d\n", + QED_WFQ_UNIT); + return -EINVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return -EINVAL; + } + + total_left_rate = min_pf_rate - total_req_min_rate; + + left_rate_per_vp = total_left_rate / non_requested_count; + if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n", + left_rate_per_vp, min_pf_rate); + return -EINVAL; + } + + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return 0; +} + +static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 vp_id, u32 rate) +{ + struct qed_mcp_link_state *p_link; + int rc = 0; + + p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (!rc) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, + "Validation failed while configuring min rate\n"); + + return rc; +} + +static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 min_pf_rate) +{ + bool use_wfq = false; + int rc = 0; + u16 i; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < p_hwfn->qm_info.num_vports; i++) { + u32 rate; + + if (!p_hwfn->qm_info.wfq_data[i].configured) + continue; + + rate = p_hwfn->qm_info.wfq_data[i].min_speed; + use_wfq = true; + + rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc) { + DP_NOTICE(p_hwfn, + "WFQ validation failed while configuring min rate\n"); + break; + } + } + + if (!rc && use_wfq) + qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + + return rc; +} + +/* Main API for qed clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate) +{ + int i, rc = -EINVAL; + + /* Currently not supported; Might change in future */ + if (cdev->num_hwfns > 1) { + DP_NOTICE(cdev, + "WFQ configuration is not supported for this device\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, + struct qed_ptt *p_ptt, u32 min_pf_rate) +{ + int i; + + if (cdev->num_hwfns > 1) { + DP_VERBOSE(cdev, + NETIF_MSG_LINK, + "WFQ configuration is not supported for this device\n"); + return; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + __qed_configure_vp_wfq_on_link_change(p_hwfn, p_ptt, + min_pf_rate); + } +} + +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed && (max_bw != 100)) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + p_hwfn->qm_info.pf_rl = p_link->speed; + + /* Since the limiter also affects Tx-switched traffic, we don't want it + * to limit such traffic in case there's no actual limit. + * In that case, set limit to imaginary high boundary. + */ + if (max_bw == 100) + p_hwfn->qm_info.pf_rl = 100000; + + rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_hwfn->qm_info.pf_rl); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw) +{ + int i, rc = -EINVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + + qed_ptt_release(p_hwfn, p_ptt); + + if (rc) + break; + } + + return rc; +} + +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = 0; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + p_hwfn->qm_info.pf_wfq = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw) +{ + int i, rc = -EINVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev); + struct qed_mcp_link_state *p_link; + struct qed_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __qed_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + qed_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + + memset(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports); +} + +int qed_device_num_ports(struct qed_dev *cdev) +{ + return cdev->num_ports; +} + +void qed_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} + +static int qed_llh_shadow_remove_all_filters(struct qed_dev *cdev, u8 ppfid) +{ + struct qed_llh_info *p_llh_info = cdev->p_llh_info; + struct qed_llh_filter_info *p_filters; + int rc; + + rc = qed_llh_shadow_sanity(cdev, ppfid, 0, "remove_all"); + if (rc) + return rc; + + p_filters = p_llh_info->pp_filters[ppfid]; + memset(p_filters, 0, NIG_REG_LLH_FUNC_FILTER_EN_SIZE * + sizeof(*p_filters)); + + return 0; +} + +static void qed_llh_clear_ppfid_filters(struct qed_dev *cdev, u8 ppfid) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u8 filter_idx, abs_ppfid; + int rc = 0; + + if (!p_ptt) + return; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && + !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + goto out; + + rc = qed_llh_abs_ppfid(cdev, ppfid, &abs_ppfid); + if (rc) + goto out; + + rc = qed_llh_shadow_remove_all_filters(cdev, ppfid); + if (rc) + goto out; + + for (filter_idx = 0; filter_idx < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; + filter_idx++) { + rc = qed_llh_remove_filter(p_hwfn, p_ptt, + abs_ppfid, filter_idx); + if (rc) + goto out; + } +out: + qed_ptt_release(p_hwfn, p_ptt); +} + +int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) +{ + return qed_llh_add_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_SRC_PORT, + src_port, QED_LLH_DONT_CARE); +} + +void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port) +{ + qed_llh_remove_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_SRC_PORT, + src_port, QED_LLH_DONT_CARE); +} + +int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) +{ + return qed_llh_add_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_DONT_CARE, dest_port); +} + +void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port) +{ + qed_llh_remove_protocol_filter(cdev, 0, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_DONT_CARE, dest_port); +} + +void qed_llh_clear_all_filters(struct qed_dev *cdev) +{ + u8 ppfid; + + if (!test_bit(QED_MF_LLH_PROTO_CLSS, &cdev->mf_bits) && + !test_bit(QED_MF_LLH_MAC_CLSS, &cdev->mf_bits)) + return; + + for (ppfid = 0; ppfid < cdev->p_llh_info->num_ppfid; ppfid++) + qed_llh_clear_ppfid_filters(cdev, ppfid); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h new file mode 100644 index 0000000000..94d4f9413a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -0,0 +1,529 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_DEV_API_H +#define _QED_DEV_API_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_if.h> +#include "qed_int.h" + +/** + * qed_init_dp(): Initialize the debug level. + * + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. + */ +void qed_init_dp(struct qed_dev *cdev, + u32 dp_module, + u8 dp_level); + +/** + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_init_struct(struct qed_dev *cdev); + +/** + * qed_resc_free: Free device resources. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_resc_free(struct qed_dev *cdev); + +/** + * qed_resc_alloc(): Alloc device resources. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_resc_alloc(struct qed_dev *cdev); + +/** + * qed_resc_setup(): Setup device resources. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_resc_setup(struct qed_dev *cdev); + +enum qed_override_force_load { + QED_OVERRIDE_FORCE_LOAD_NONE, + QED_OVERRIDE_FORCE_LOAD_ALWAYS, + QED_OVERRIDE_FORCE_LOAD_NEVER, +}; + +struct qed_drv_load_params { + /* Indicates whether the driver is running over a crash kernel. + * As part of the load request, this will be used for providing the + * driver role to the MFW. + * In case of a crash kernel over PDA - this should be set to false. + */ + bool is_crash_kernel; + + /* The timeout value that the MFW should use when locking the engine for + * the driver load process. + * A value of '0' means the default value, and '255' means no timeout. + */ + u8 mfw_timeout_val; +#define QED_LOAD_REQ_LOCK_TO_DEFAULT 0 +#define QED_LOAD_REQ_LOCK_TO_NONE 255 + + /* Avoid engine reset when first PF loads on it */ + bool avoid_eng_reset; + + /* Allow overriding the default force load behavior */ + enum qed_override_force_load override_force_load; +}; + +struct qed_hw_init_params { + /* Tunneling parameters */ + struct qed_tunnel_info *p_tunn; + + bool b_hw_start; + + /* Interrupt mode [msix, inta, etc.] to use */ + enum qed_int_mode int_mode; + + /* NPAR tx switching to be used for vports for tx-switching */ + bool allow_npar_tx_switch; + + /* Binary fw data pointer in binary fw file */ + const u8 *bin_fw_data; + + /* Driver load parameters */ + struct qed_drv_load_params *p_drv_load_params; +}; + +/** + * qed_hw_init(): Init Qed hardware. + * + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. + * + * Return: Int. + */ +int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); + +/** + * qed_hw_timers_stop_all(): Stop the timers HW block. + * + * @cdev: Qed dev pointer. + * + * Return: void. + */ +void qed_hw_timers_stop_all(struct qed_dev *cdev); + +/** + * qed_hw_stop(): Stop Qed hardware. + * + * @cdev: Qed dev pointer. + * + * Return: int. + */ +int qed_hw_stop(struct qed_dev *cdev); + +/** + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_hw_stop_fastpath(struct qed_dev *cdev); + +/** + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); + +/** + * qed_hw_prepare(): Prepare Qed hardware. + * + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. + * + * Return: Int. + */ +int qed_hw_prepare(struct qed_dev *cdev, + int personality); + +/** + * qed_hw_remove(): Remove Qed hardware. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_hw_remove(struct qed_dev *cdev); + +/** + * qed_ptt_acquire(): Allocate a PTT window. + * + * @p_hwfn: HW device data. + * + * Return: struct qed_ptt. + * + * Should be called at the entry point to the driver (at the beginning of an + * exported function). + */ +struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); + +/** + * qed_ptt_acquire_context(): Allocate a PTT window honoring the context + * atomicy. + * + * @p_hwfn: HW device data. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: struct qed_ptt. + * + * Should be called at the entry point to the driver + * (at the beginning of an exported function). + */ +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, + bool is_atomic); + +/** + * qed_ptt_release(): Release PTT Window. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + * + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. + */ +void qed_ptt_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); +void qed_reset_vport_stats(struct qed_dev *cdev); + +enum qed_dmae_address_type_t { + QED_DMAE_ADDRESS_HOST_VIRT, + QED_DMAE_ADDRESS_HOST_PHYS, + QED_DMAE_ADDRESS_GRC +}; + +/** + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. + */ +int +qed_dmae_host2grc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, + u32 size_in_dwords, + struct qed_dmae_params *p_params); + + /** + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. + */ +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, + struct qed_dmae_params *p_params); + +/** + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. + */ +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params); + +int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, + struct qed_chain_init_params *params); +void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); + +/** + * qed_fw_l2_queue(): Get absolute L2 queue ID. + * + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. + * + * Return: Int. + */ +int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, + u16 src_id, + u16 *dst_id); + +/** + * qed_fw_vport(): Get absolute vport ID. + * + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. + * + * Return: Int. + */ +int qed_fw_vport(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * qed_fw_rss_eng(): Get absolute RSS engine ID. + * + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. + * + * Return: Int. + */ +int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, + u8 src_id, + u8 *dst_id); + +/** + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. + * + * @cdev: Qed dev pointer. + * + * Return: u8 Number of LLH filter banks. + */ +u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); + +enum qed_eng { + QED_ENG0, + QED_ENG1, + QED_BOTH_ENG, +}; + +/** + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. + * + * Return: Int. + */ +int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, + u8 ppfid, enum qed_eng eng); + +/** + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. + * + * @cdev: Qed dev pointer. + * @eng: Engine. + * + * Return: Int. + */ +int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); + +/** + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. + * + * Return: Int. + */ +int qed_llh_add_mac_filter(struct qed_dev *cdev, + u8 ppfid, const u8 mac_addr[ETH_ALEN]); + +/** + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove + * + * Return: Void. + */ +void qed_llh_remove_mac_filter(struct qed_dev *cdev, + u8 ppfid, u8 mac_addr[ETH_ALEN]); + +enum qed_llh_prot_filter_type_t { + QED_LLH_FILTER_ETHERTYPE, + QED_LLH_FILTER_TCP_SRC_PORT, + QED_LLH_FILTER_TCP_DEST_PORT, + QED_LLH_FILTER_TCP_SRC_AND_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_PORT, + QED_LLH_FILTER_UDP_DEST_PORT, + QED_LLH_FILTER_UDP_SRC_AND_DEST_PORT +}; + +/** + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. + * + * Return: Int. + */ +int +qed_llh_add_protocol_filter(struct qed_dev *cdev, + u8 ppfid, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port); + +/** + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. + */ +void +qed_llh_remove_protocol_filter(struct qed_dev *cdev, + u8 ppfid, + enum qed_llh_prot_filter_type_t type, + u16 source_port_or_eth_type, u16 dest_port); + +/** + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. + * + * Return: Int. + */ +int qed_final_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 id, bool is_vf); + +/** + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. + * + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. + * + * Return: Int. + **/ +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); + +/** + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and + * Tx queue. The fact that we can configure coalescing to up to 511, but on + * varying accuracy [the bigger the value the less accurate] up to a mistake + * of 3usec for the highest values. + * While the API allows setting coalescing per-qid, all queues sharing a SB + * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] + * otherwise configuration would break. + * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. + * + * Return: Int. + **/ +int +qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); + +/** + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. + * + * Return: Int. + */ +int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_enable); + +/** + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. + * + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. + * + * Return: Int. + */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * qed_db_recovery_del() - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the + * entry to delete. + * + * Return: Int. + */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); + +const char *qed_hw_get_resc_name(enum qed_resources res_id); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c new file mode 100644 index 0000000000..be5cc8b79b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -0,0 +1,242 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Marvell/Qlogic FastLinQ NIC driver + * + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include <linux/kernel.h> +#include <linux/qed/qed_if.h> +#include <linux/vmalloc.h> +#include "qed.h" +#include "qed_devlink.h" + +enum qed_devlink_param_id { + QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + QED_DEVLINK_PARAM_ID_IWARP_CMT, +}; + +struct qed_fw_fatal_ctx { + enum qed_hw_err_type err_type; +}; + +int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type) +{ + struct qed_devlink *qdl = devlink_priv(devlink); + struct qed_fw_fatal_ctx fw_fatal_ctx = { + .err_type = err_type, + }; + + if (qdl->fw_reporter) + devlink_health_report(qdl->fw_reporter, + "Fatal error occurred", &fw_fatal_ctx); + + return 0; +} + +static int +qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter, + struct devlink_fmsg *fmsg, void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct qed_devlink *qdl = devlink_health_reporter_priv(reporter); + struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx; + struct qed_dev *cdev = qdl->cdev; + u32 dbg_data_buf_size; + u8 *p_dbg_data_buf; + int err; + + /* Having context means that was a dump request after fatal, + * so we enable extra debugging while gathering the dump, + * just in case + */ + cdev->print_dbg_data = fw_fatal_ctx ? true : false; + + dbg_data_buf_size = qed_dbg_all_data_size(cdev); + p_dbg_data_buf = vzalloc(dbg_data_buf_size); + if (!p_dbg_data_buf) { + DP_NOTICE(cdev, + "Failed to allocate memory for a debug data buffer\n"); + return -ENOMEM; + } + + err = qed_dbg_all_data(cdev, p_dbg_data_buf); + if (err) { + DP_NOTICE(cdev, "Failed to obtain debug data\n"); + vfree(p_dbg_data_buf); + return err; + } + + err = devlink_fmsg_binary_pair_put(fmsg, "dump_data", + p_dbg_data_buf, dbg_data_buf_size); + + vfree(p_dbg_data_buf); + + return err; +} + +static int +qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter, + void *priv_ctx, + struct netlink_ext_ack *extack) +{ + struct qed_devlink *qdl = devlink_health_reporter_priv(reporter); + struct qed_dev *cdev = qdl->cdev; + + qed_recovery_process(cdev); + + return 0; +} + +static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = { + .name = "fw_fatal", + .recover = qed_fw_fatal_reporter_recover, + .dump = qed_fw_fatal_reporter_dump, +}; + +#define QED_REPORTER_FW_GRACEFUL_PERIOD 0 + +void qed_fw_reporters_create(struct devlink *devlink) +{ + struct qed_devlink *dl = devlink_priv(devlink); + + dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops, + QED_REPORTER_FW_GRACEFUL_PERIOD, dl); + if (IS_ERR(dl->fw_reporter)) { + DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n", + PTR_ERR(dl->fw_reporter)); + dl->fw_reporter = NULL; + } +} + +void qed_fw_reporters_destroy(struct devlink *devlink) +{ + struct qed_devlink *dl = devlink_priv(devlink); + struct devlink_health_reporter *rep; + + rep = dl->fw_reporter; + + if (!IS_ERR_OR_NULL(rep)) + devlink_health_reporter_destroy(rep); +} + +static int qed_dl_param_get(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct qed_devlink *qed_dl = devlink_priv(dl); + struct qed_dev *cdev; + + cdev = qed_dl->cdev; + ctx->val.vbool = cdev->iwarp_cmt; + + return 0; +} + +static int qed_dl_param_set(struct devlink *dl, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct qed_devlink *qed_dl = devlink_priv(dl); + struct qed_dev *cdev; + + cdev = qed_dl->cdev; + cdev->iwarp_cmt = ctx->val.vbool; + + return 0; +} + +static const struct devlink_param qed_devlink_params[] = { + DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, + "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + qed_dl_param_get, qed_dl_param_set, NULL), +}; + +static int qed_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct qed_devlink *qed_dl = devlink_priv(devlink); + struct qed_dev *cdev = qed_dl->cdev; + struct qed_dev_info *dev_info; + char buf[100]; + int err; + + dev_info = &cdev->common_dev_info; + + memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num)); + buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0; + + if (buf[0]) { + err = devlink_info_board_serial_number_put(req, buf); + if (err) + return err; + } + + snprintf(buf, sizeof(buf), "%d.%d.%d.%d", + GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3), + GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2), + GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1), + GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0)); + + err = devlink_info_version_stored_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf); + if (err) + return err; + + snprintf(buf, sizeof(buf), "%d.%d.%d.%d", + dev_info->fw_major, + dev_info->fw_minor, + dev_info->fw_rev, + dev_info->fw_eng); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf); +} + +static const struct devlink_ops qed_dl_ops = { + .info_get = qed_devlink_info_get, +}; + +struct devlink *qed_devlink_register(struct qed_dev *cdev) +{ + struct qed_devlink *qdevlink; + struct devlink *dl; + int rc; + + dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink), + &cdev->pdev->dev); + if (!dl) + return ERR_PTR(-ENOMEM); + + qdevlink = devlink_priv(dl); + qdevlink->cdev = cdev; + + rc = devlink_params_register(dl, qed_devlink_params, + ARRAY_SIZE(qed_devlink_params)); + if (rc) + goto err_unregister; + + cdev->iwarp_cmt = false; + + qed_fw_reporters_create(dl); + devlink_register(dl); + return dl; + +err_unregister: + devlink_free(dl); + + return ERR_PTR(rc); +} + +void qed_devlink_unregister(struct devlink *devlink) +{ + if (!devlink) + return; + + devlink_unregister(devlink); + qed_fw_reporters_destroy(devlink); + + devlink_params_unregister(devlink, qed_devlink_params, + ARRAY_SIZE(qed_devlink_params)); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.h b/drivers/net/ethernet/qlogic/qed/qed_devlink.h new file mode 100644 index 0000000000..ccc7d1d1bf --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Marvell/Qlogic FastLinQ NIC driver + * + * Copyright (C) 2020 Marvell International Ltd. + */ +#ifndef _QED_DEVLINK_H +#define _QED_DEVLINK_H + +#include <linux/qed/qed_if.h> +#include <net/devlink.h> + +struct devlink *qed_devlink_register(struct qed_dev *cdev); +void qed_devlink_unregister(struct devlink *devlink); + +void qed_fw_reporters_create(struct devlink *devlink); +void qed_fw_reporters_destroy(struct devlink *devlink); + +int qed_report_fatal_error(struct devlink *dl, enum qed_hw_err_type err_type); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c new file mode 100644 index 0000000000..04602ac947 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -0,0 +1,1045 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#define __PREVENT_DUMP_MEM_ARR__ +#define __PREVENT_PXP_GLOBAL_WIN__ +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_fcoe.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_iro_hsi.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include <linux/qed/qed_fcoe_if.h> + +struct qed_fcoe_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + u8 layer_code; + + dma_addr_t sq_pbl_addr; + dma_addr_t sq_curr_page_addr; + dma_addr_t sq_next_page_addr; + dma_addr_t xferq_pbl_addr; + void *xferq_pbl_addr_virt_addr; + dma_addr_t xferq_addr[4]; + void *xferq_addr_virt_addr[4]; + dma_addr_t confq_pbl_addr; + void *confq_pbl_addr_virt_addr; + dma_addr_t confq_addr[2]; + void *confq_addr_virt_addr[2]; + + dma_addr_t terminate_params; + + u16 dst_mac_addr_lo; + u16 dst_mac_addr_mid; + u16 dst_mac_addr_hi; + u16 src_mac_addr_lo; + u16 src_mac_addr_mid; + u16 src_mac_addr_hi; + + u16 tx_max_fc_pay_len; + u16 e_d_tov_timer_val; + u16 rec_tov_timer_val; + u16 rx_max_fc_pay_len; + u16 vlan_tag; + u16 physical_q0; + + struct fc_addr_nw s_id; + u8 max_conc_seqs_c3; + struct fc_addr_nw d_id; + u8 flags; + u8 def_q_idx; +}; + +static int +qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_fcoe_pf_params *fcoe_pf_params = NULL; + struct fcoe_init_ramrod_params *p_ramrod = NULL; + struct fcoe_init_func_ramrod_data *p_data; + struct fcoe_conn_context *p_cxt = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_cxt_info cxt_info; + u32 dummy_cid; + int rc = 0; + __le16 tmp; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_init; + p_data = &p_ramrod->init_ramrod_data; + fcoe_pf_params = &p_hwfn->pf_params.fcoe_pf_params; + + /* Sanity */ + if (fcoe_pf_params->num_cqs > p_hwfn->hw_info.feat_num[QED_FCOE_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. CQs requested %d, CQs available %d. Aborting function start\n", + fcoe_pf_params->num_cqs, + p_hwfn->hw_info.feat_num[QED_FCOE_CQ]); + rc = -EINVAL; + goto err; + } + + p_data->mtu = cpu_to_le16(fcoe_pf_params->mtu); + tmp = cpu_to_le16(fcoe_pf_params->sq_num_pbl_pages); + p_data->sq_num_pages_in_pbl = tmp; + + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &dummy_cid); + if (rc) + goto err; + + cxt_info.iid = dummy_cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for dummy cid=%d\n", + dummy_cid); + goto err; + } + p_cxt = cxt_info.p_cxt; + memset(p_cxt, 0, sizeof(*p_cxt)); + + SET_FIELD(p_cxt->tstorm_ag_context.flags3, + TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); + + fcoe_pf_params->dummy_icid = (u16)dummy_cid; + + tmp = cpu_to_le16(fcoe_pf_params->num_tasks); + p_data->func_params.num_tasks = tmp; + p_data->func_params.log_page_size = fcoe_pf_params->log_page_size; + p_data->func_params.debug_mode = fcoe_pf_params->debug_mode; + + DMA_REGPAIR_LE(p_data->q_params.glbl_q_params_addr, + fcoe_pf_params->glbl_q_params_addr); + + tmp = cpu_to_le16(fcoe_pf_params->cq_num_entries); + p_data->q_params.cq_num_entries = tmp; + + tmp = cpu_to_le16(fcoe_pf_params->cmdq_num_entries); + p_data->q_params.cmdq_num_entries = tmp; + + p_data->q_params.num_queues = fcoe_pf_params->num_cqs; + + tmp = (__force __le16)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_data->q_params.queue_relative_offset = (__force u8)tmp; + + for (i = 0; i < fcoe_pf_params->num_cqs; i++) { + tmp = cpu_to_le16(qed_get_igu_sb_id(p_hwfn, i)); + p_data->q_params.cq_cmdq_sb_num_arr[i] = tmp; + } + + p_data->q_params.cq_sb_pi = fcoe_pf_params->gl_rq_pi; + p_data->q_params.cmdq_sb_pi = fcoe_pf_params->gl_cmd_pi; + + p_data->q_params.bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_RQ], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_RQ] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_RQ] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_RQ]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_RQ] = tmp; + + DMA_REGPAIR_LE(p_data->q_params.bdq_pbl_base_address[BDQ_ID_IMM_DATA], + fcoe_pf_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + fcoe_pf_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xoff_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]); + p_data->q_params.bdq_xon_threshold[BDQ_ID_IMM_DATA] = tmp; + tmp = cpu_to_le16(fcoe_pf_params->rq_buffer_size); + p_data->q_params.rq_buffer_size = tmp; + + if (fcoe_pf_params->is_target) { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_data->q_params.bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_data->q_params.q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + return rc; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; +} + +static int +qed_sp_fcoe_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_offload_ramrod_params *p_ramrod = NULL; + struct fcoe_conn_offload_ramrod_data *p_data; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 physical_q0; + __le16 tmp; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_ofld; + p_data = &p_ramrod->offload_ramrod_data; + + /* Transmission PQ is the first of the PF */ + physical_q0 = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_conn->physical_q0 = physical_q0; + p_data->physical_q0 = cpu_to_le16(physical_q0); + + p_data->conn_id = cpu_to_le16(p_conn->conn_id); + DMA_REGPAIR_LE(p_data->sq_pbl_addr, p_conn->sq_pbl_addr); + DMA_REGPAIR_LE(p_data->sq_curr_page_addr, p_conn->sq_curr_page_addr); + DMA_REGPAIR_LE(p_data->sq_next_page_addr, p_conn->sq_next_page_addr); + DMA_REGPAIR_LE(p_data->xferq_pbl_addr, p_conn->xferq_pbl_addr); + DMA_REGPAIR_LE(p_data->xferq_curr_page_addr, p_conn->xferq_addr[0]); + DMA_REGPAIR_LE(p_data->xferq_next_page_addr, p_conn->xferq_addr[1]); + + DMA_REGPAIR_LE(p_data->respq_pbl_addr, p_conn->confq_pbl_addr); + DMA_REGPAIR_LE(p_data->respq_curr_page_addr, p_conn->confq_addr[0]); + DMA_REGPAIR_LE(p_data->respq_next_page_addr, p_conn->confq_addr[1]); + + p_data->dst_mac_addr_lo = cpu_to_le16(p_conn->dst_mac_addr_lo); + p_data->dst_mac_addr_mid = cpu_to_le16(p_conn->dst_mac_addr_mid); + p_data->dst_mac_addr_hi = cpu_to_le16(p_conn->dst_mac_addr_hi); + p_data->src_mac_addr_lo = cpu_to_le16(p_conn->src_mac_addr_lo); + p_data->src_mac_addr_mid = cpu_to_le16(p_conn->src_mac_addr_mid); + p_data->src_mac_addr_hi = cpu_to_le16(p_conn->src_mac_addr_hi); + + tmp = cpu_to_le16(p_conn->tx_max_fc_pay_len); + p_data->tx_max_fc_pay_len = tmp; + tmp = cpu_to_le16(p_conn->e_d_tov_timer_val); + p_data->e_d_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rec_tov_timer_val); + p_data->rec_rr_tov_timer_val = tmp; + tmp = cpu_to_le16(p_conn->rx_max_fc_pay_len); + p_data->rx_max_fc_pay_len = tmp; + + p_data->vlan_tag = cpu_to_le16(p_conn->vlan_tag); + p_data->s_id.addr_hi = p_conn->s_id.addr_hi; + p_data->s_id.addr_mid = p_conn->s_id.addr_mid; + p_data->s_id.addr_lo = p_conn->s_id.addr_lo; + p_data->max_conc_seqs_c3 = p_conn->max_conc_seqs_c3; + p_data->d_id.addr_hi = p_conn->d_id.addr_hi; + p_data->d_id.addr_mid = p_conn->d_id.addr_mid; + p_data->d_id.addr_lo = p_conn->d_id.addr_lo; + p_data->flags = p_conn->flags; + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + SET_FIELD(p_data->flags, + FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN, 1); + p_data->def_q_idx = p_conn->def_q_idx; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_conn_destroy(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct fcoe_conn_terminate_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.fcoe_conn_terminate; + DMA_REGPAIR_LE(p_ramrod->terminate_ramrod_data.terminate_params_addr, + p_conn->terminate_params); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_fcoe_func_stop(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u32 active_segs = 0; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_hwfn->pf_params.fcoe_pf_params.dummy_icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_FCOE, &init_data); + if (rc) + return rc; + + active_segs = qed_rd(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK); + active_segs &= ~BIT(QED_CXT_FCOE_TID_SEG); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, active_segs); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_fcoe_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + void *p_addr; + u32 i; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + if (!list_empty(&p_hwfn->p_fcoe_info->free_list)) + p_conn = + list_first_entry(&p_hwfn->p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_pbl_xferq; + p_conn->xferq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->xferq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->xferq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->xferq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->xferq_addr[i]; + } + + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_pbl_addr, GFP_KERNEL); + if (!p_addr) + goto nomem_xferq; + p_conn->confq_pbl_addr_virt_addr = p_addr; + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + p_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + &p_conn->confq_addr[i], GFP_KERNEL); + if (!p_addr) + goto nomem_confq; + p_conn->confq_addr_virt_addr[i] = p_addr; + + p_addr = p_conn->confq_pbl_addr_virt_addr; + ((dma_addr_t *)p_addr)[i] = p_conn->confq_addr[i]; + } + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem_confq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) + if (p_conn->confq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); +nomem_xferq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) + if (p_conn->xferq_addr_virt_addr[i]) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); +nomem_pbl_xferq: + kfree(p_conn); + return -ENOMEM; +} + +static void qed_fcoe_free_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + u32 i; + + if (!p_conn) + return; + + if (p_conn->confq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_pbl_addr_virt_addr, + p_conn->confq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->confq_addr); i++) { + if (!p_conn->confq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->confq_addr_virt_addr[i], + p_conn->confq_addr[i]); + } + + if (p_conn->xferq_pbl_addr_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_pbl_addr_virt_addr, + p_conn->xferq_pbl_addr); + + for (i = 0; i < ARRAY_SIZE(p_conn->xferq_addr); i++) { + if (!p_conn->xferq_addr_virt_addr[i]) + continue; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + QED_CHAIN_PAGE_SIZE, + p_conn->xferq_addr_virt_addr[i], + p_conn->xferq_addr[i]); + } + kfree(p_conn); +} + +static void __iomem *qed_fcoe_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } +} + +static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } +} + +int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_fcoe_info *p_fcoe_info; + + /* Allocate LL2's set struct */ + p_fcoe_info = kzalloc(sizeof(*p_fcoe_info), GFP_KERNEL); + if (!p_fcoe_info) { + DP_NOTICE(p_hwfn, "Failed to allocate qed_fcoe_info'\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&p_fcoe_info->free_list); + + p_hwfn->p_fcoe_info = p_fcoe_info; + return 0; +} + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn) +{ + struct fcoe_task_context *p_task_ctx = NULL; + u32 i, lc; + int rc; + + spin_lock_init(&p_hwfn->p_fcoe_info->lock); + for (i = 0; i < p_hwfn->pf_params.fcoe_pf_params.num_tasks; i++) { + rc = qed_cxt_get_task_ctx(p_hwfn, i, + QED_CTX_WORKING_MEM, + (void **)&p_task_ctx); + if (rc) + continue; + + memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1); + p_task_ctx->timer_context.logical_client_0 = cpu_to_le32(lc); + + lc = 0; + SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC1, 1); + p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc); + + SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, + TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); + } +} + +void qed_fcoe_free(struct qed_hwfn *p_hwfn) +{ + struct qed_fcoe_conn *p_conn = NULL; + + if (!p_hwfn->p_fcoe_info) + return; + + while (!list_empty(&p_hwfn->p_fcoe_info->free_list)) { + p_conn = list_first_entry(&p_hwfn->p_fcoe_info->free_list, + struct qed_fcoe_conn, list_entry); + if (!p_conn) + break; + list_del(&p_conn->list_entry); + qed_fcoe_free_connection(p_hwfn, p_conn); + } + + kfree(p_hwfn->p_fcoe_info); + p_hwfn->p_fcoe_info = NULL; +} + +static int +qed_fcoe_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_in_conn, + struct qed_fcoe_conn **p_out_conn) +{ + struct qed_fcoe_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_FCOE, &icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + if (rc) + return rc; + + /* Use input connection [if provided] or allocate a new one */ + if (p_in_conn) { + p_conn = p_in_conn; + } else { + rc = qed_fcoe_allocate_connection(p_hwfn, &p_conn); + if (rc) { + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); + return rc; + } + } + + p_conn->icid = icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + *p_out_conn = p_conn; + + return rc; +} + +static void qed_fcoe_release_connection(struct qed_hwfn *p_hwfn, + struct qed_fcoe_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_fcoe_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_fcoe_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_fcoe_info->lock); +} + +static void _qed_fcoe_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_rx_stat tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_FCOE_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->fcoe_rx_byte_cnt = HILO_64_REGPAIR(tstats.fcoe_rx_byte_cnt); + p_stats->fcoe_rx_data_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_data_pkt_cnt); + p_stats->fcoe_rx_xfer_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_xfer_pkt_cnt); + p_stats->fcoe_rx_other_pkt_cnt = + HILO_64_REGPAIR(tstats.fcoe_rx_other_pkt_cnt); + + p_stats->fcoe_silent_drop_pkt_cmdq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_cmdq_full_cnt); + p_stats->fcoe_silent_drop_pkt_rq_full_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_rq_full_cnt); + p_stats->fcoe_silent_drop_pkt_crc_error_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_crc_error_cnt); + p_stats->fcoe_silent_drop_pkt_task_invalid_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_pkt_task_invalid_cnt); + p_stats->fcoe_silent_drop_total_pkt_cnt = + le32_to_cpu(tstats.fcoe_silent_drop_total_pkt_cnt); +} + +static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_fcoe_stats *p_stats) +{ + struct fcoe_tx_stat pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_FCOE_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->fcoe_tx_byte_cnt = HILO_64_REGPAIR(pstats.fcoe_tx_byte_cnt); + p_stats->fcoe_tx_data_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_data_pkt_cnt); + p_stats->fcoe_tx_xfer_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_xfer_pkt_cnt); + p_stats->fcoe_tx_other_pkt_cnt = + HILO_64_REGPAIR(pstats.fcoe_tx_other_pkt_cnt); +} + +static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, + struct qed_fcoe_stats *p_stats, + bool is_atomic) +{ + struct qed_ptt *p_ptt; + + memset(p_stats, 0, sizeof(*p_stats)); + + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EINVAL; + } + + _qed_fcoe_get_tstats(p_hwfn, p_ptt, p_stats); + _qed_fcoe_get_pstats(p_hwfn, p_ptt, p_stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_fcoe_con { + struct hlist_node node; + struct qed_fcoe_conn *con; +}; + +static int qed_fill_fcoe_dev_info(struct qed_dev *cdev, + struct qed_dev_fcoe_info *info) +{ + struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_fcoe_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_fcoe_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + info->wwpn = hwfn->mcp_info->func_info.wwn_port; + info->wwnn = hwfn->mcp_info->func_info.wwn_node; + + info->num_cqs = FEAT_NUM(hwfn, QED_FCOE_CQ); + + return rc; +} + +static void qed_register_fcoe_ops(struct qed_dev *cdev, + struct qed_fcoe_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.fcoe = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_fcoe_con *qed_fcoe_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_fcoe_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_fcoe_stop(struct qed_dev *cdev) +{ + struct qed_ptt *p_ptt; + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "fcoe already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop fcoe - not all connections were returned\n"); + return -EINVAL; + } + + p_ptt = qed_ptt_acquire(QED_AFFIN_HWFN(cdev)); + if (!p_ptt) + return -EAGAIN; + + /* Stop the fcoe */ + rc = qed_sp_fcoe_func_stop(QED_AFFIN_HWFN(cdev), p_ptt, + QED_SPQ_MODE_EBLOCK, NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + qed_ptt_release(QED_AFFIN_HWFN(cdev), p_ptt); + + return rc; +} + +static int qed_fcoe_start(struct qed_dev *cdev, struct qed_fcoe_tid *tasks) +{ + int rc; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "fcoe already started;\n"); + return 0; + } + + rc = qed_sp_fcoe_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_NOTICE(cdev, "Failed to start fcoe\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (tasks) { + struct qed_tid_mem *tid_info = kzalloc(sizeof(*tid_info), + GFP_ATOMIC); + + if (!tid_info) { + DP_NOTICE(cdev, + "Failed to allocate tasks information\n"); + qed_fcoe_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_fcoe_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_FCOE * sizeof(u8 *)); + + kfree(tid_info); + } + + return 0; +} + +static int qed_fcoe_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_fcoe_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_KERNEL); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to allocate hashed connection\n"); + return -ENOMEM; + } + + /* Acquire the connection */ + rc = qed_fcoe_acquire_connection(QED_AFFIN_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_fcoe_get_db_addr(QED_AFFIN_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_fcoe_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_fcoe_con *hash_con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_fcoe_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_fcoe_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_fcoe_params_offload *conn_info) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->sq_curr_page_addr = conn_info->sq_curr_page_addr; + con->sq_next_page_addr = conn_info->sq_next_page_addr; + con->tx_max_fc_pay_len = conn_info->tx_max_fc_pay_len; + con->e_d_tov_timer_val = conn_info->e_d_tov_timer_val; + con->rec_tov_timer_val = conn_info->rec_tov_timer_val; + con->rx_max_fc_pay_len = conn_info->rx_max_fc_pay_len; + con->vlan_tag = conn_info->vlan_tag; + con->max_conc_seqs_c3 = conn_info->max_conc_seqs_c3; + con->flags = conn_info->flags; + con->def_q_idx = conn_info->def_q_idx; + + con->src_mac_addr_hi = (conn_info->src_mac[5] << 8) | + conn_info->src_mac[4]; + con->src_mac_addr_mid = (conn_info->src_mac[3] << 8) | + conn_info->src_mac[2]; + con->src_mac_addr_lo = (conn_info->src_mac[1] << 8) | + conn_info->src_mac[0]; + con->dst_mac_addr_hi = (conn_info->dst_mac[5] << 8) | + conn_info->dst_mac[4]; + con->dst_mac_addr_mid = (conn_info->dst_mac[3] << 8) | + conn_info->dst_mac[2]; + con->dst_mac_addr_lo = (conn_info->dst_mac[1] << 8) | + conn_info->dst_mac[0]; + + con->s_id.addr_hi = conn_info->s_id.addr_hi; + con->s_id.addr_mid = conn_info->s_id.addr_mid; + con->s_id.addr_lo = conn_info->s_id.addr_lo; + con->d_id.addr_hi = conn_info->d_id.addr_hi; + con->d_id.addr_mid = conn_info->d_id.addr_mid; + con->d_id.addr_lo = conn_info->d_id.addr_lo; + + return qed_sp_fcoe_conn_offload(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_destroy_conn(struct qed_dev *cdev, + u32 handle, dma_addr_t terminate_params) +{ + struct qed_hash_fcoe_con *hash_con; + struct qed_fcoe_conn *con; + + hash_con = qed_fcoe_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->terminate_params = terminate_params; + + return qed_sp_fcoe_conn_destroy(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_fcoe_stats_context(struct qed_dev *cdev, + struct qed_fcoe_stats *stats, + bool is_atomic) +{ + return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + +static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) +{ + return qed_fcoe_stats_context(cdev, stats, false); +} + +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) +{ + struct qed_fcoe_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect FCoE statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pkts = proto_stats.fcoe_rx_data_pkt_cnt + + proto_stats.fcoe_rx_xfer_pkt_cnt + + proto_stats.fcoe_rx_other_pkt_cnt; + stats->tx_pkts = proto_stats.fcoe_tx_data_pkt_cnt + + proto_stats.fcoe_tx_xfer_pkt_cnt + + proto_stats.fcoe_tx_other_pkt_cnt; + stats->fcs_err = proto_stats.fcoe_silent_drop_pkt_crc_error_cnt; + + /* Request protocol driver to fill-in the rest */ + if (cdev->protocol_ops.fcoe && cdev->ops_cookie) { + struct qed_fcoe_cb_ops *ops = cdev->protocol_ops.fcoe; + void *cookie = cdev->ops_cookie; + + if (ops->get_login_failures) + stats->login_failure = ops->get_login_failures(cookie); + } +} + +static const struct qed_fcoe_ops qed_fcoe_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_fcoe_dev_info, + .start = &qed_fcoe_start, + .stop = &qed_fcoe_stop, + .register_ops = &qed_register_fcoe_ops, + .acquire_conn = &qed_fcoe_acquire_conn, + .release_conn = &qed_fcoe_release_conn, + .offload_conn = &qed_fcoe_offload_conn, + .destroy_conn = &qed_fcoe_destroy_conn, + .get_stats = &qed_fcoe_stats, +}; + +const struct qed_fcoe_ops *qed_get_fcoe_ops(void) +{ + return &qed_fcoe_ops_pass; +} +EXPORT_SYMBOL(qed_get_fcoe_ops); + +void qed_put_fcoe_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_fcoe_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h new file mode 100644 index 0000000000..214e8299ec --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_FCOE_H +#define _QED_FCOE_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_fcoe_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; +}; + +#if IS_ENABLED(CONFIG_QED_FCOE) +int qed_fcoe_alloc(struct qed_hwfn *p_hwfn); + +void qed_fcoe_setup(struct qed_hwfn *p_hwfn); + +void qed_fcoe_free(struct qed_hwfn *p_hwfn); +/** + * qed_get_protocol_stats_fcoe(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats, + bool is_atomic); +#else /* CONFIG_QED_FCOE */ +static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {} +static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {} + +static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) +{ +} +#endif /* CONFIG_QED_FCOE */ + +#endif /* _QED_FCOE_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h new file mode 100644 index 0000000000..ed1a84542a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -0,0 +1,10932 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_HSI_H +#define _QED_HSI_H + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/storage_common.h> +#include <linux/qed/tcp_common.h> +#include <linux/qed/fcoe_common.h> +#include <linux/qed/eth_common.h> +#include <linux/qed/iscsi_common.h> +#include <linux/qed/nvmetcp_common.h> +#include <linux/qed/iwarp_common.h> +#include <linux/qed/rdma_common.h> +#include <linux/qed/roce_common.h> +#include <linux/qed/qed_fcoe_if.h> + +struct qed_hwfn; +struct qed_ptt; + +/* Opcodes for the event ring */ +enum common_event_opcode { + COMMON_EVENT_PF_START, + COMMON_EVENT_PF_STOP, + COMMON_EVENT_VF_START, + COMMON_EVENT_VF_STOP, + COMMON_EVENT_VF_PF_CHANNEL, + COMMON_EVENT_VF_FLR, + COMMON_EVENT_PF_UPDATE, + COMMON_EVENT_FW_ERROR, + COMMON_EVENT_RL_UPDATE, + COMMON_EVENT_EMPTY, + MAX_COMMON_EVENT_OPCODE +}; + +/* Common Ramrod Command IDs */ +enum common_ramrod_cmd_id { + COMMON_RAMROD_UNUSED, + COMMON_RAMROD_PF_START, + COMMON_RAMROD_PF_STOP, + COMMON_RAMROD_VF_START, + COMMON_RAMROD_VF_STOP, + COMMON_RAMROD_PF_UPDATE, + COMMON_RAMROD_RL_UPDATE, + COMMON_RAMROD_EMPTY, + MAX_COMMON_RAMROD_CMD_ID +}; + +/* How ll2 should deal with packet upon errors */ +enum core_error_handle { + LL2_DROP_PACKET, + LL2_DO_NOTHING, + LL2_ASSERT, + MAX_CORE_ERROR_HANDLE +}; + +/* Opcodes for the event ring */ +enum core_event_opcode { + CORE_EVENT_TX_QUEUE_START, + CORE_EVENT_TX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_START, + CORE_EVENT_RX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_FLUSH, + CORE_EVENT_TX_QUEUE_UPDATE, + CORE_EVENT_QUEUE_STATS_QUERY, + MAX_CORE_EVENT_OPCODE +}; + +/* The L4 pseudo checksum mode for Core */ +enum core_l4_pseudo_checksum_mode { + CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH, + CORE_L4_PSEUDO_CSUM_ZERO_LENGTH, + MAX_CORE_L4_PSEUDO_CHECKSUM_MODE +}; + +/* LL2 SP error code */ +enum core_ll2_error_code { + LL2_OK = 0, + LL2_ERROR, + MAX_CORE_LL2_ERROR_CODE +}; + +/* Light-L2 RX Producers in Tstorm RAM */ +struct core_ll2_port_stats { + struct regpair gsi_invalid_hdr; + struct regpair gsi_invalid_pkt_length; + struct regpair gsi_unsupported_pkt_typ; + struct regpair gsi_crcchksm_error; +}; + +/* LL2 TX Per Queue Stats */ +struct core_ll2_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +/* Light-L2 RX Producers in Tstorm RAM */ +struct core_ll2_rx_prod { + __le16 bd_prod; + __le16 cqe_prod; +}; + +struct core_ll2_tstorm_per_queue_stat { + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; +}; + +struct core_ll2_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +struct core_ll2_rx_per_queue_stat { + struct core_ll2_tstorm_per_queue_stat tstorm_stat; + struct core_ll2_ustorm_per_queue_stat ustorm_stat; +}; + +struct core_ll2_tx_per_queue_stat { + struct core_ll2_pstorm_per_queue_stat pstorm_stat; +}; + +/* Structure for doorbell data, in PWM mode, for RX producers update. */ +struct core_pwm_prod_update_data { + __le16 icid; /* internal CID */ + u8 reserved0; + u8 params; +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_MASK 0x3 +#define CORE_PWM_PROD_UPDATE_DATA_AGG_CMD_SHIFT 0 +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_MASK 0x3F /* Set 0 */ +#define CORE_PWM_PROD_UPDATE_DATA_RESERVED1_SHIFT 2 + struct core_ll2_rx_prod prod; /* Producers */ +}; + +/* Ramrod data for rx/tx queue statistics query ramrod */ +struct core_queue_stats_query_ramrod_data { + u8 rx_stat; + u8 tx_stat; + __le16 reserved[3]; + struct regpair rx_stat_addr; + struct regpair tx_stat_addr; +}; + +/* Core Ramrod Command IDs (light L2) */ +enum core_ramrod_cmd_id { + CORE_RAMROD_UNUSED, + CORE_RAMROD_RX_QUEUE_START, + CORE_RAMROD_TX_QUEUE_START, + CORE_RAMROD_RX_QUEUE_STOP, + CORE_RAMROD_TX_QUEUE_STOP, + CORE_RAMROD_RX_QUEUE_FLUSH, + CORE_RAMROD_TX_QUEUE_UPDATE, + CORE_RAMROD_QUEUE_STATS_QUERY, + MAX_CORE_RAMROD_CMD_ID +}; + +/* Core RX CQE Type for Light L2 */ +enum core_roce_flavor_type { + CORE_ROCE, + CORE_RROCE, + MAX_CORE_ROCE_FLAVOR_TYPE +}; + +/* Specifies how ll2 should deal with packets errors: packet_too_big and + * no_buff. + */ +struct core_rx_action_on_error { + u8 error_type; +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +}; + +/* Core RX BD for Light L2 */ +struct core_rx_bd { + struct regpair addr; + __le16 reserved[4]; +}; + +/* Core RX CM offload BD for Light L2 */ +struct core_rx_bd_with_buff_len { + struct regpair addr; + __le16 buff_length; + __le16 reserved[3]; +}; + +/* Core RX CM offload BD for Light L2 */ +union core_rx_bd_union { + struct core_rx_bd rx_bd; + struct core_rx_bd_with_buff_len rx_bd_with_len; +}; + +/* Opaque Data for Light L2 RX CQE */ +struct core_rx_cqe_opaque_data { + __le32 data[2]; +}; + +/* Core RX CQE Type for Light L2 */ +enum core_rx_cqe_type { + CORE_RX_CQE_ILLEGAL_TYPE, + CORE_RX_CQE_TYPE_REGULAR, + CORE_RX_CQE_TYPE_GSI_OFFLOAD, + CORE_RX_CQE_TYPE_SLOW_PATH, + MAX_CORE_RX_CQE_TYPE +}; + +/* Core RX CQE for Light L2 */ +struct core_rx_fast_path_cqe { + u8 type; + u8 placement_offset; + struct parsing_and_err_flags parse_flags; + __le16 packet_length; + __le16 vlan; + struct core_rx_cqe_opaque_data opaque_data; + struct parsing_err_flags err_flags; + u8 packet_source; + u8 reserved0; + __le32 reserved1[3]; +}; + +/* Core Rx CM offload CQE */ +struct core_rx_gsi_offload_cqe { + u8 type; + u8 data_length_error; + struct parsing_and_err_flags parse_flags; + __le16 data_length; + __le16 vlan; + __le32 src_mac_addrhi; + __le16 src_mac_addrlo; + __le16 qp_id; + __le32 src_qp; + struct core_rx_cqe_opaque_data opaque_data; + u8 packet_source; + u8 reserved[3]; +}; + +/* Core RX CQE for Light L2 */ +struct core_rx_slow_path_cqe { + u8 type; + u8 ramrod_cmd_id; + __le16 echo; + struct core_rx_cqe_opaque_data opaque_data; + __le32 reserved1[5]; +}; + +/* Core RX CM offload BD for Light L2 */ +union core_rx_cqe_union { + struct core_rx_fast_path_cqe rx_cqe_fp; + struct core_rx_gsi_offload_cqe rx_cqe_gsi; + struct core_rx_slow_path_cqe rx_cqe_sp; +}; + +/* RX packet source. */ +enum core_rx_pkt_source { + CORE_RX_PKT_SOURCE_NETWORK = 0, + CORE_RX_PKT_SOURCE_LB, + CORE_RX_PKT_SOURCE_TX, + CORE_RX_PKT_SOURCE_LL2_TX, + MAX_CORE_RX_PKT_SOURCE +}; + +/* Ramrod data for rx queue start ramrod */ +struct core_rx_start_ramrod_data { + struct regpair bd_base; + struct regpair cqe_pbl_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 drop_ttl0_flg; + __le16 num_of_pbl_pages; + u8 inner_vlan_stripping_en; + u8 report_outer_vlan; + u8 queue_id; + u8 main_func_queue; + u8 mf_si_bcast_accept_all; + u8 mf_si_mcast_accept_all; + struct core_rx_action_on_error action_on_error; + u8 gsi_offload_flag; + u8 vport_id_valid; + u8 vport_id; + u8 zero_prod_flg; + u8 wipe_inner_vlan_pri_en; + u8 reserved[2]; +}; + +/* Ramrod data for rx queue stop ramrod */ +struct core_rx_stop_ramrod_data { + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 queue_id; + u8 reserved1; + __le16 reserved2[2]; +}; + +/* Flags for Core TX BD */ +struct core_tx_bd_data { + __le16 as_bitfield; +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_DATA_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_DATA_START_BD_MASK 0x1 +#define CORE_TX_BD_DATA_START_BD_SHIFT 2 +#define CORE_TX_BD_DATA_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_DATA_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_DATA_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_DATA_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_DATA_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_DATA_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_DATA_L4_PSEUDO_CSUM_MODE_SHIFT 7 +#define CORE_TX_BD_DATA_NBDS_MASK 0xF +#define CORE_TX_BD_DATA_NBDS_SHIFT 8 +#define CORE_TX_BD_DATA_ROCE_FLAV_MASK 0x1 +#define CORE_TX_BD_DATA_ROCE_FLAV_SHIFT 12 +#define CORE_TX_BD_DATA_IP_LEN_MASK 0x1 +#define CORE_TX_BD_DATA_IP_LEN_SHIFT 13 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_MASK 0x1 +#define CORE_TX_BD_DATA_DISABLE_STAG_INSERTION_SHIFT 14 +#define CORE_TX_BD_DATA_RESERVED0_MASK 0x1 +#define CORE_TX_BD_DATA_RESERVED0_SHIFT 15 +}; + +/* Core TX BD for Light L2 */ +struct core_tx_bd { + struct regpair addr; + __le16 nbytes; + __le16 nw_vlan_or_lb_echo; + struct core_tx_bd_data bd_data; + __le16 bitfield1; +#define CORE_TX_BD_L4_HDR_OFFSET_W_MASK 0x3FFF +#define CORE_TX_BD_L4_HDR_OFFSET_W_SHIFT 0 +#define CORE_TX_BD_TX_DST_MASK 0x3 +#define CORE_TX_BD_TX_DST_SHIFT 14 +}; + +/* Light L2 TX Destination */ +enum core_tx_dest { + CORE_TX_DEST_NW, + CORE_TX_DEST_LB, + CORE_TX_DEST_RESERVED, + CORE_TX_DEST_DROP, + MAX_CORE_TX_DEST +}; + +/* Ramrod data for tx queue start ramrod */ +struct core_tx_start_ramrod_data { + struct regpair pbl_base_addr; + __le16 mtu; + __le16 sb_id; + u8 sb_index; + u8 stats_en; + u8 stats_id; + u8 conn_type; + __le16 pbl_size; + __le16 qm_pq_id; + u8 gsi_offload_flag; + u8 ctx_stats_en; + u8 vport_id_valid; + u8 vport_id; + u8 enforce_security_flag; + u8 reserved[7]; +}; + +/* Ramrod data for tx queue stop ramrod */ +struct core_tx_stop_ramrod_data { + __le32 reserved0[2]; +}; + +/* Ramrod data for tx queue update ramrod */ +struct core_tx_update_ramrod_data { + u8 update_qm_pq_id_flg; + u8 reserved0; + __le16 qm_pq_id; + __le32 reserved1[1]; +}; + +/* Enum flag for what type of dcb data to update */ +enum dcb_dscp_update_mode { + DONT_UPDATE_DCB_DSCP, + UPDATE_DCB, + UPDATE_DSCP, + UPDATE_DCB_DSCP, + MAX_DCB_DSCP_UPDATE_MODE +}; + +/* The core storm context for the Ystorm */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The core storm context for the Pstorm */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[20]; +}; + +/* Core Slowpath Connection storm context of Xstorm */ +struct xstorm_core_conn_st_ctx { + struct regpair spq_base_addr; + __le32 reserved0[2]; + __le16 spq_cons; + __le16 reserved1[111]; +}; + +struct xstorm_core_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 consolid_prod; + __le16 reserved16; + __le16 tx_bd_cons; + __le16 tx_bd_or_spq_prod; + __le16 updated_qm_pq_id; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; +}; + +struct tstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 ll2_rx_prod; + __le32 reg10; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 rx_producers; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The core storm context for the Mstorm */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[40]; +}; + +/* The core storm context for the Ustorm */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[20]; +}; + +/* The core storm context for the Tstorm */ +struct tstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* core connection context */ +struct core_conn_context { + struct ystorm_core_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_core_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_core_conn_st_ctx xstorm_st_context; + struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; + struct mstorm_core_conn_st_ctx mstorm_st_context; + struct ustorm_core_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct tstorm_core_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; +}; + +struct eth_mstorm_per_pf_stat { + struct regpair gre_discard_pkts; + struct regpair vxlan_discard_pkts; + struct regpair geneve_discard_pkts; + struct regpair lb_discard_pkts; +}; + +struct eth_mstorm_per_queue_stat { + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +/* Ethernet TX Per PF */ +struct eth_pstorm_per_pf_stat { + struct regpair sent_lb_ucast_bytes; + struct regpair sent_lb_mcast_bytes; + struct regpair sent_lb_bcast_bytes; + struct regpair sent_lb_ucast_pkts; + struct regpair sent_lb_mcast_pkts; + struct regpair sent_lb_bcast_pkts; + struct regpair sent_gre_bytes; + struct regpair sent_vxlan_bytes; + struct regpair sent_geneve_bytes; + struct regpair sent_mpls_bytes; + struct regpair sent_gre_mpls_bytes; + struct regpair sent_udp_mpls_bytes; + struct regpair sent_gre_pkts; + struct regpair sent_vxlan_pkts; + struct regpair sent_geneve_pkts; + struct regpair sent_mpls_pkts; + struct regpair sent_gre_mpls_pkts; + struct regpair sent_udp_mpls_pkts; + struct regpair gre_drop_pkts; + struct regpair vxlan_drop_pkts; + struct regpair geneve_drop_pkts; + struct regpair mpls_drop_pkts; + struct regpair gre_mpls_drop_pkts; + struct regpair udp_mpls_drop_pkts; +}; + +/* Ethernet TX Per Queue Stats */ +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes; + struct regpair sent_mcast_bytes; + struct regpair sent_bcast_bytes; + struct regpair sent_ucast_pkts; + struct regpair sent_mcast_pkts; + struct regpair sent_bcast_pkts; + struct regpair error_drop_pkts; +}; + +/* ETH Rx producers data */ +struct eth_rx_rate_limit { + __le16 mult; + __le16 cnst; + u8 add_sub_cnst; + u8 reserved0; + __le16 reserved1; +}; + +/* Update RSS indirection table entry command */ +struct eth_tstorm_rss_update_data { + u8 vport_id; + u8 ind_table_index; + __le16 ind_table_value; + __le16 reserved1; + u8 reserved; + u8 valid; +}; + +struct eth_ustorm_per_pf_stat { + struct regpair rcv_lb_ucast_bytes; + struct regpair rcv_lb_mcast_bytes; + struct regpair rcv_lb_bcast_bytes; + struct regpair rcv_lb_ucast_pkts; + struct regpair rcv_lb_mcast_pkts; + struct regpair rcv_lb_bcast_pkts; + struct regpair rcv_gre_bytes; + struct regpair rcv_vxlan_bytes; + struct regpair rcv_geneve_bytes; + struct regpair rcv_gre_pkts; + struct regpair rcv_vxlan_pkts; + struct regpair rcv_geneve_pkts; +}; + +struct eth_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +/* Event Ring VF-PF Channel data */ +struct vf_pf_channel_eqe_data { + struct regpair msg_addr; +}; + +/* Event Ring initial cleanup data */ +struct initial_cleanup_eqe_data { + u8 vf_id; + u8 reserved[7]; +}; + +/* FW error data */ +struct fw_err_data { + u8 recovery_scope; + u8 err_id; + __le16 entity_id; + u8 reserved[4]; +}; + +/* Event Data Union */ +union event_ring_data { + u8 bytes[8]; + struct vf_pf_channel_eqe_data vf_pf_channel; + struct iscsi_eqe_data iscsi_info; + struct iscsi_connect_done_results iscsi_conn_done_info; + union rdma_eqe_data rdma_data; + struct initial_cleanup_eqe_data vf_init_cleanup; + struct fw_err_data err_data; +}; + +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + u8 reserved0; + u8 vf_id; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* Event Ring Next Page Address */ +struct event_ring_next_addr { + struct regpair addr; + __le32 reserved[2]; +}; + +/* Event Ring Element */ +union event_ring_element { + struct event_ring_entry entry; + struct event_ring_next_addr next_addr; +}; + +/* Ports mode */ +enum fw_flow_ctrl_mode { + flow_ctrl_pause, + flow_ctrl_pfc, + MAX_FW_FLOW_CTRL_MODE +}; + +/* GFT profile type */ +enum gft_profile_type { + GFT_PROFILE_TYPE_4_TUPLE, + GFT_PROFILE_TYPE_L4_DST_PORT, + GFT_PROFILE_TYPE_IP_DST_ADDR, + GFT_PROFILE_TYPE_IP_SRC_ADDR, + GFT_PROFILE_TYPE_TUNNEL_TYPE, + MAX_GFT_PROFILE_TYPE +}; + +/* Major and Minor hsi Versions */ +struct hsi_fp_ver_struct { + u8 minor_ver_arr[2]; + u8 major_ver_arr[2]; +}; + +/* Integration Phase */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3, + INTEG_PHASE_BB_B0_NO_MCP = 10, + INTEG_PHASE_BB_B0_WITH_MCP = 11, + MAX_INTEG_PHASE +}; + +/* Ports mode */ +enum iwarp_ll2_tx_queues { + IWARP_LL2_IN_ORDER_TX_QUEUE = 1, + IWARP_LL2_ALIGNED_TX_QUEUE, + IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE, + IWARP_LL2_ERROR, + MAX_IWARP_LL2_TX_QUEUES +}; + +/* Function error ID */ +enum func_err_id { + FUNC_NO_ERROR, + VF_PF_CHANNEL_NOT_READY, + VF_ZONE_MSG_NOT_VALID, + VF_ZONE_FUNC_NOT_ENABLED, + ETH_PACKET_TOO_SMALL, + ETH_ILLEGAL_VLAN_MODE, + ETH_MTU_VIOLATION, + ETH_ILLEGAL_INBAND_TAGS, + ETH_VLAN_INSERT_AND_INBAND_VLAN, + ETH_ILLEGAL_NBDS, + ETH_FIRST_BD_WO_SOP, + ETH_INSUFFICIENT_BDS, + ETH_ILLEGAL_LSO_HDR_NBDS, + ETH_ILLEGAL_LSO_MSS, + ETH_ZERO_SIZE_BD, + ETH_ILLEGAL_LSO_HDR_LEN, + ETH_INSUFFICIENT_PAYLOAD, + ETH_EDPM_OUT_OF_SYNC, + ETH_TUNN_IPV6_EXT_NBD_ERR, + ETH_CONTROL_PACKET_VIOLATION, + ETH_ANTI_SPOOFING_ERR, + ETH_PACKET_SIZE_TOO_LARGE, + CORE_ILLEGAL_VLAN_MODE, + CORE_ILLEGAL_NBDS, + CORE_FIRST_BD_WO_SOP, + CORE_INSUFFICIENT_BDS, + CORE_PACKET_TOO_SMALL, + CORE_ILLEGAL_INBAND_TAGS, + CORE_VLAN_INSERT_AND_INBAND_VLAN, + CORE_MTU_VIOLATION, + CORE_CONTROL_PACKET_VIOLATION, + CORE_ANTI_SPOOFING_ERR, + CORE_PACKET_SIZE_TOO_LARGE, + CORE_ILLEGAL_BD_FLAGS, + CORE_GSI_PACKET_VIOLATION, + MAX_FUNC_ERR_ID +}; + +/* FW error handling mode */ +enum fw_err_mode { + FW_ERR_FATAL_ASSERT, + FW_ERR_DRV_REPORT, + MAX_FW_ERR_MODE +}; + +/* FW error recovery scope */ +enum fw_err_recovery_scope { + ERR_SCOPE_INVALID, + ERR_SCOPE_TX_Q, + ERR_SCOPE_RX_Q, + ERR_SCOPE_QP, + ERR_SCOPE_VPORT, + ERR_SCOPE_FUNC, + ERR_SCOPE_PORT, + ERR_SCOPE_ENGINE, + MAX_FW_ERR_RECOVERY_SCOPE +}; + +/* Mstorm non-triggering VF zone */ +struct mstorm_non_trigger_vf_zone { + struct eth_mstorm_per_queue_stat eth_queue_stat; + struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD]; +}; + +/* Mstorm VF zone */ +struct mstorm_vf_zone { + struct mstorm_non_trigger_vf_zone non_trigger; +}; + +/* vlan header including TPID and TCI fields */ +struct vlan_header { + __le16 tpid; + __le16 tci; +}; + +/* outer tag configurations */ +struct outer_tag_config_struct { + u8 enable_stag_pri_change; + u8 pri_map_valid; + u8 reserved[2]; + struct vlan_header outer_tag; + u8 inner_to_outer_pri_map[8]; +}; + +/* personality per PF */ +enum personality_type { + BAD_PERSONALITY_TYP, + PERSONALITY_TCP_ULP, + PERSONALITY_FCOE, + PERSONALITY_RDMA_AND_ETH, + PERSONALITY_RDMA, + PERSONALITY_CORE, + PERSONALITY_ETH, + PERSONALITY_RESERVED, + MAX_PERSONALITY_TYPE +}; + +/* tunnel configuration */ +struct pf_start_tunnel_config { + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved[3]; +}; + +/* Ramrod data for PF start ramrod */ +struct pf_start_ramrod_data { + struct regpair event_ring_pbl_addr; + struct regpair consolid_q_pbl_base_addr; + struct pf_start_tunnel_config tunnel_config; + __le16 event_ring_sb_id; + u8 base_vf_id; + u8 num_vfs; + u8 event_ring_num_pages; + u8 event_ring_sb_index; + u8 path_id; + u8 warning_as_error; + u8 dont_log_ramrods; + u8 personality; + __le16 log_type_mask; + u8 mf_mode; + u8 integ_phase; + u8 allow_npar_tx_switching; + u8 reserved0; + struct hsi_fp_ver_struct hsi_fp_ver; + struct outer_tag_config_struct outer_tag_config; + u8 pf_fp_err_mode; + u8 consolid_q_num_pages; + u8 reserved[6]; +}; + +/* Data for port update ramrod */ +struct protocol_dcb_data { + u8 dcb_enable_flag; + u8 dscp_enable_flag; + u8 dcb_priority; + u8 dcb_tc; + u8 dscp_val; + u8 dcb_dont_add_vlan0; +}; + +/* Update tunnel configuration */ +struct pf_update_tunnel_config { + u8 update_rx_pf_clss; + u8 update_rx_def_ucast_clss; + u8 update_rx_def_non_ucast_clss; + u8 set_vxlan_udp_port_flg; + u8 set_geneve_udp_port_flg; + u8 set_no_inner_l2_vxlan_udp_port_flg; + u8 tunnel_clss_vxlan; + u8 tunnel_clss_l2geneve; + u8 tunnel_clss_ipgeneve; + u8 tunnel_clss_l2gre; + u8 tunnel_clss_ipgre; + u8 reserved; + __le16 vxlan_udp_port; + __le16 geneve_udp_port; + __le16 no_inner_l2_vxlan_udp_port; + __le16 reserved1[3]; +}; + +/* Data for port update ramrod */ +struct pf_update_ramrod_data { + u8 update_eth_dcb_data_mode; + u8 update_fcoe_dcb_data_mode; + u8 update_iscsi_dcb_data_mode; + u8 update_roce_dcb_data_mode; + u8 update_rroce_dcb_data_mode; + u8 update_iwarp_dcb_data_mode; + u8 update_mf_vlan_flag; + u8 update_enable_stag_pri_change; + struct protocol_dcb_data eth_dcb_data; + struct protocol_dcb_data fcoe_dcb_data; + struct protocol_dcb_data iscsi_dcb_data; + struct protocol_dcb_data roce_dcb_data; + struct protocol_dcb_data rroce_dcb_data; + struct protocol_dcb_data iwarp_dcb_data; + __le16 mf_vlan; + u8 enable_stag_pri_change; + u8 reserved; + struct pf_update_tunnel_config tunnel_config; +}; + +/* Ports mode */ +enum ports_mode { + ENGX2_PORTX1, + ENGX2_PORTX2, + ENGX1_PORTX1, + ENGX1_PORTX2, + ENGX1_PORTX4, + MAX_PORTS_MODE +}; + +/* Protocol-common error code */ +enum protocol_common_error_code { + COMMON_ERR_CODE_OK = 0, + COMMON_ERR_CODE_ERROR, + MAX_PROTOCOL_COMMON_ERROR_CODE +}; + +/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */ +enum protocol_version_array_key { + ETH_VER_KEY = 0, + ROCE_VER_KEY, + MAX_PROTOCOL_VERSION_ARRAY_KEY +}; + +/* RDMA TX Stats */ +struct rdma_sent_stats { + struct regpair sent_bytes; + struct regpair sent_pkts; +}; + +/* Pstorm non-triggering VF zone */ +struct pstorm_non_trigger_vf_zone { + struct eth_pstorm_per_queue_stat eth_queue_stat; + struct rdma_sent_stats rdma_stats; +}; + +/* Pstorm VF zone */ +struct pstorm_vf_zone { + struct pstorm_non_trigger_vf_zone non_trigger; + struct regpair reserved[7]; +}; + +/* Ramrod Header of SPQE */ +struct ramrod_header { + __le32 cid; + u8 cmd_id; + u8 protocol_id; + __le16 echo; +}; + +/* RDMA RX Stats */ +struct rdma_rcv_stats { + struct regpair rcv_bytes; + struct regpair rcv_pkts; +}; + +/* Data for update QCN/DCQCN RL ramrod */ +struct rl_update_ramrod_data { + u8 qcn_update_param_flg; + u8 dcqcn_update_param_flg; + u8 rl_init_flg; + u8 rl_start_flg; + u8 rl_stop_flg; + u8 rl_id_first; + u8 rl_id_last; + u8 rl_dc_qcn_flg; + u8 dcqcn_reset_alpha_on_idle; + u8 rl_bc_stage_th; + u8 rl_timer_stage_th; + u8 reserved1; + __le32 rl_bc_rate; + __le16 rl_max_rate; + __le16 rl_r_ai; + __le16 rl_r_hai; + __le16 dcqcn_g; + __le32 dcqcn_k_us; + __le32 dcqcn_timeuot_us; + __le32 qcn_timeuot_us; + __le32 reserved2; +}; + +/* Slowpath Element (SPQE) */ +struct slow_path_element { + struct ramrod_header hdr; + struct regpair data_ptr; +}; + +/* Tstorm non-triggering VF zone */ +struct tstorm_non_trigger_vf_zone { + struct rdma_rcv_stats rdma_stats; +}; + +struct tstorm_per_port_stat { + struct regpair trunc_error_discard; + struct regpair mac_error_discard; + struct regpair mftag_filter_discard; + struct regpair eth_mac_filter_discard; + struct regpair ll2_mac_filter_discard; + struct regpair ll2_conn_disabled_discard; + struct regpair iscsi_irregular_pkt; + struct regpair fcoe_irregular_pkt; + struct regpair roce_irregular_pkt; + struct regpair iwarp_irregular_pkt; + struct regpair eth_irregular_pkt; + struct regpair toe_irregular_pkt; + struct regpair preroce_irregular_pkt; + struct regpair eth_gre_tunn_filter_discard; + struct regpair eth_vxlan_tunn_filter_discard; + struct regpair eth_geneve_tunn_filter_discard; + struct regpair eth_gft_drop_pkt; +}; + +/* Tstorm VF zone */ +struct tstorm_vf_zone { + struct tstorm_non_trigger_vf_zone non_trigger; +}; + +/* Tunnel classification scheme */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = 0, + TUNNEL_CLSS_MAC_VNI, + TUNNEL_CLSS_INNER_MAC_VLAN, + TUNNEL_CLSS_INNER_MAC_VNI, + TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE, + MAX_TUNNEL_CLSS +}; + +/* Ustorm non-triggering VF zone */ +struct ustorm_non_trigger_vf_zone { + struct eth_ustorm_per_queue_stat eth_queue_stat; + struct regpair vf_pf_msg_addr; +}; + +/* Ustorm triggering VF zone */ +struct ustorm_trigger_vf_zone { + u8 vf_pf_msg_valid; + u8 reserved[7]; +}; + +/* Ustorm VF zone */ +struct ustorm_vf_zone { + struct ustorm_non_trigger_vf_zone non_trigger; + struct ustorm_trigger_vf_zone trigger; +}; + +/* VF-PF channel data */ +struct vf_pf_channel_data { + __le32 ready; + u8 valid; + u8 reserved0; + __le16 reserved1; +}; + +/* Ramrod data for VF start ramrod */ +struct vf_start_ramrod_data { + u8 vf_id; + u8 enable_flr_ack; + __le16 opaque_fid; + u8 personality; + u8 reserved[7]; + struct hsi_fp_ver_struct hsi_fp_ver; + +}; + +/* Ramrod data for VF start ramrod */ +struct vf_stop_ramrod_data { + u8 vf_id; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; +}; + +/* VF zone size mode */ +enum vf_zone_size_mode { + VF_ZONE_SIZE_MODE_DEFAULT, + VF_ZONE_SIZE_MODE_DOUBLE, + VF_ZONE_SIZE_MODE_QUAD, + MAX_VF_ZONE_SIZE_MODE +}; + +/* Xstorm non-triggering VF zone */ +struct xstorm_non_trigger_vf_zone { + struct regpair non_edpm_ack_pkts; +}; + +/* Tstorm VF zone */ +struct xstorm_vf_zone { + struct xstorm_non_trigger_vf_zone non_trigger; +}; + +/* Attentions status block */ +struct atten_status_block { + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index; + __le32 reserved1; +}; + +/* DMAE command */ +struct dmae_cmd { + __le32 opcode; +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 + __le32 src_addr_lo; + __le32 src_addr_hi; + __le32 dst_addr_lo; + __le32 dst_addr_hi; + __le16 length_dw; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF +#define DMAE_CMD_DST_VF_ID_SHIFT 8 + __le32 comp_addr_lo; + __le32 comp_addr_hi; + __le32 comp_val; + __le32 crc32; + __le32 crc_32_c; + __le16 crc16; + __le16 crc16_c; + __le16 crc10; + __le16 error_bit_reserved; +#define DMAE_CMD_ERROR_BIT_MASK 0x1 +#define DMAE_CMD_ERROR_BIT_SHIFT 0 +#define DMAE_CMD_RESERVED_MASK 0x7FFF +#define DMAE_CMD_RESERVED_SHIFT 1 + __le16 xsum16; + __le16 xsum8; +}; + +enum dmae_cmd_comp_crc_en_enum { + dmae_cmd_comp_crc_disabled, + dmae_cmd_comp_crc_enabled, + MAX_DMAE_CMD_COMP_CRC_EN_ENUM +}; + +enum dmae_cmd_comp_func_enum { + dmae_cmd_comp_func_to_src, + dmae_cmd_comp_func_to_dst, + MAX_DMAE_CMD_COMP_FUNC_ENUM +}; + +enum dmae_cmd_comp_word_en_enum { + dmae_cmd_comp_word_disabled, + dmae_cmd_comp_word_enabled, + MAX_DMAE_CMD_COMP_WORD_EN_ENUM +}; + +enum dmae_cmd_c_dst_enum { + dmae_cmd_c_dst_pcie, + dmae_cmd_c_dst_grc, + MAX_DMAE_CMD_C_DST_ENUM +}; + +enum dmae_cmd_dst_enum { + dmae_cmd_dst_none_0, + dmae_cmd_dst_pcie, + dmae_cmd_dst_grc, + dmae_cmd_dst_none_3, + MAX_DMAE_CMD_DST_ENUM +}; + +enum dmae_cmd_error_handling_enum { + dmae_cmd_error_handling_send_regular_comp, + dmae_cmd_error_handling_send_comp_with_err, + dmae_cmd_error_handling_dont_send_comp, + MAX_DMAE_CMD_ERROR_HANDLING_ENUM +}; + +enum dmae_cmd_src_enum { + dmae_cmd_src_pcie, + dmae_cmd_src_grc, + MAX_DMAE_CMD_SRC_ENUM +}; + +struct mstorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/* DMAE parameters */ +struct qed_dmae_params { + u32 flags; +/* If QED_DMAE_PARAMS_RW_REPL_SRC flag is set and the + * source is a block of length DMAE_MAX_RW_SIZE and the + * destination is larger, the source block will be duplicated as + * many times as required to fill the destination block. This is + * used mostly to write a zeroed buffer to destination address + * using DMA + */ +#define QED_DMAE_PARAMS_RW_REPL_SRC_MASK 0x1 +#define QED_DMAE_PARAMS_RW_REPL_SRC_SHIFT 0 +#define QED_DMAE_PARAMS_SRC_VF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_SRC_VF_VALID_SHIFT 1 +#define QED_DMAE_PARAMS_DST_VF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_DST_VF_VALID_SHIFT 2 +#define QED_DMAE_PARAMS_COMPLETION_DST_MASK 0x1 +#define QED_DMAE_PARAMS_COMPLETION_DST_SHIFT 3 +#define QED_DMAE_PARAMS_PORT_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_PORT_VALID_SHIFT 4 +#define QED_DMAE_PARAMS_SRC_PF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_SRC_PF_VALID_SHIFT 5 +#define QED_DMAE_PARAMS_DST_PF_VALID_MASK 0x1 +#define QED_DMAE_PARAMS_DST_PF_VALID_SHIFT 6 +#define QED_DMAE_PARAMS_RESERVED_MASK 0x1FFFFFF +#define QED_DMAE_PARAMS_RESERVED_SHIFT 7 + u8 src_vfid; + u8 dst_vfid; + u8 port_id; + u8 src_pfid; + u8 dst_pfid; + u8 reserved1; + __le16 reserved2; +}; + +/* IGU cleanup command */ +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* IGU firmware driver command */ +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + +/* IGU firmware driver command */ +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +}; + +/* IGU mapping line structure */ +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + +/* IGU MSIX line structure */ +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + +/* per encapsulation type enabling flags */ +struct prs_reg_encapsulation_type_en { + u8 flags; +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR, + TPH_ST_HINT_REQUESTER, + TPH_ST_HINT_TARGET, + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + +/* QM hardware structure of enable bypass credit mask */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + +/* QM hardware structure of opportunistic credit mask */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + +/* QM hardware structure of QM map memory */ +struct qm_rf_pq_map { + __le32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + +/* SDM operation gen command (generate aggregative interrupt) */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +/* Physical memory descriptor */ +struct phys_mem_desc { + dma_addr_t phys_addr; + void *virt_addr; + u32 size; /* In bytes */ +}; + +/* Virtual memory descriptor */ +struct virt_mem_desc { + void *ptr; + u32 size; /* In bytes */ +}; + +/********************************/ +/* HSI Init Functions constants */ +/********************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* BRB RAM init requirements */ +struct init_brb_ram_req { + u32 guranteed_per_tc; + u32 headroom_per_tc; + u32 min_pkt_size; + u32 max_ports_per_engine; + u8 num_active_tcs[MAX_NUM_PORTS]; +}; + +/* ETS per-TC init requirements */ +struct init_ets_tc_req { + u8 use_sp; + u8 use_wfq; + u16 weight; +}; + +/* ETS init requirements */ +struct init_ets_req { + u32 mtu; + struct init_ets_tc_req tc_req[NUM_OF_TCS]; +}; + +/* NIG LB RL init requirements */ +struct init_nig_lb_rl_req { + u16 lb_mac_rate; + u16 lb_rate; + u32 mtu; + u16 tc_rate[NUM_OF_PHYS_TCS]; +}; + +/* NIG TC mapping for each priority */ +struct init_nig_pri_tc_map_entry { + u8 tc_id; + u8 valid; +}; + +/* NIG priority to TC map init requirements */ +struct init_nig_pri_tc_map_req { + struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; +}; + +/* QM per global RL init parameters */ +struct init_qm_global_rl_params { + u8 type; + u8 reserved0; + u16 reserved1; + u32 rate_limit; +}; + +/* QM per-port init parameters */ +struct init_qm_port_params { + u16 active_phys_tcs; + u16 num_pbf_cmd_lines; + u16 num_btb_blocks; + u8 active; + u8 reserved; +}; + +/* QM per-PQ init parameters */ +struct init_qm_pq_params { + u16 vport_id; + u16 rl_id; + u8 rl_valid; + u8 tc_id; + u8 wrr_group; + u8 port_id; +}; + +/* QM per RL init parameters */ +struct init_qm_rl_params { + u32 vport_rl; + u8 vport_rl_type; + u8 reserved[3]; +}; + +/* QM Rate Limiter types */ +enum init_qm_rl_type { + QM_RL_TYPE_NORMAL, + QM_RL_TYPE_QCN, + MAX_INIT_QM_RL_TYPE +}; + +/* QM per-vport init parameters */ +struct init_qm_vport_params { + u16 wfq; + u16 reserved; + u16 tc_wfq[NUM_OF_TCS]; + u16 first_tx_pq_id[NUM_OF_TCS]; +}; + +/**************************************/ +/* Init Tool HSI constants and macros */ +/**************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR (BIT(GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 +enum chip_ids { + CHIP_BB, + CHIP_K2, + MAX_CHIP_IDS +}; + +struct fw_asserts_ram_section { + __le16 section_ram_line_offset; + __le16 section_ram_line_size; + u8 list_dword_offset; + u8 list_element_dword_size; + u8 list_num_elements; + u8 list_next_index_dword_offset; +}; + +struct fw_ver_num { + u8 major; + u8 minor; + u8 rev; + u8 eng; +}; + +struct fw_ver_info { + __le16 tools_ver; + u8 image_id; + u8 reserved1; + struct fw_ver_num num; + __le32 timestamp; + __le32 reserved2; +}; + +struct fw_info { + struct fw_ver_info ver; + struct fw_asserts_ram_section fw_asserts_section; +}; + +struct fw_info_location { + __le32 grc_addr; + __le32 size; +}; + +enum init_modes { + MODE_BB_A0_DEPRECATED, + MODE_BB, + MODE_K2, + MODE_ASIC, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, + MODE_SF, + MODE_MF_SD, + MODE_MF_SI, + MODE_PORTS_PER_ENG_1, + MODE_PORTS_PER_ENG_2, + MODE_PORTS_PER_ENG_4, + MODE_100G, + MODE_SKIP_PRAM_INIT, + MODE_EMUL_MAC, + MAX_INIT_MODES +}; + +enum init_phases { + PHASE_ENGINE, + PHASE_PORT, + PHASE_PF, + PHASE_VF, + PHASE_QM_PF, + MAX_INIT_PHASES +}; + +enum init_split_types { + SPLIT_TYPE_NONE, + SPLIT_TYPE_PORT, + SPLIT_TYPE_PF, + SPLIT_TYPE_PORT_PF, + SPLIT_TYPE_VF, + MAX_INIT_SPLIT_TYPES +}; + +/* Binary buffer header */ +struct bin_buffer_hdr { + u32 offset; + u32 length; +}; + +/* Binary init buffer types */ +enum bin_init_buffer_type { + BIN_BUF_INIT_FW_VER_INFO, + BIN_BUF_INIT_CMD, + BIN_BUF_INIT_VAL, + BIN_BUF_INIT_MODE_TREE, + BIN_BUF_INIT_IRO, + BIN_BUF_INIT_OVERLAYS, + MAX_BIN_INIT_BUFFER_TYPE +}; + +/* FW overlay buffer header */ +struct fw_overlay_buf_hdr { + u32 data; +#define FW_OVERLAY_BUF_HDR_STORM_ID_MASK 0xFF +#define FW_OVERLAY_BUF_HDR_STORM_ID_SHIFT 0 +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_MASK 0xFFFFFF +#define FW_OVERLAY_BUF_HDR_BUF_SIZE_SHIFT 8 +}; + +/* init array header: raw */ +struct init_array_raw_hdr { + __le32 data; +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +}; + +/* init array header: standard */ +struct init_array_standard_hdr { + __le32 data; +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +}; + +/* init array header: zipped */ +struct init_array_zipped_hdr { + __le32 data; +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +}; + +/* init array header: pattern */ +struct init_array_pattern_hdr { + __le32 data; +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +}; + +/* init array header union */ +union init_array_hdr { + struct init_array_raw_hdr raw; + struct init_array_standard_hdr standard; + struct init_array_zipped_hdr zipped; + struct init_array_pattern_hdr pattern; +}; + +/* init array types */ +enum init_array_types { + INIT_ARR_STANDARD, + INIT_ARR_ZIPPED, + INIT_ARR_PATTERN, + MAX_INIT_ARRAY_TYPES +}; + +/* init operation: callback */ +struct init_callback_op { + __le32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + __le16 callback_id; + __le16 block_id; +}; + +/* init operation: delay */ +struct init_delay_op { + __le32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay; +}; + +/* init operation: if_mode */ +struct init_if_mode_op { + __le32 op_data; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + __le16 reserved2; + __le16 modes_buf_offset; +}; + +/* init operation: if_phase */ +struct init_if_phase_op { + __le32 op_data; +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 + __le32 phase_data; +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +}; + +/* init mode operators */ +enum init_mode_ops { + INIT_MODE_OP_NOT, + INIT_MODE_OP_OR, + INIT_MODE_OP_AND, + MAX_INIT_MODE_OPS +}; + +/* init operation: raw */ +struct init_raw_op { + __le32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF +#define INIT_RAW_OP_PARAM1_SHIFT 4 + __le32 param2; +}; + +/* init array params */ +struct init_op_array_params { + __le16 size; + __le16 offset; +}; + +/* Write init operation arguments */ +union init_write_args { + __le32 inline_val; + __le32 zeros_count; + __le32 array_offset; + struct init_op_array_params runtime; +}; + +/* init operation: write */ +struct init_write_op { + __le32 data; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args; +}; + +/* init operation: read */ +struct init_read_op { + __le32 op_data; +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 + __le32 expected_val; +}; + +/* Init operations union */ +union init_op { + struct init_raw_op raw; + struct init_write_op write; + struct init_read_op read; + struct init_if_mode_op if_mode; + struct init_if_phase_op if_phase; + struct init_callback_op callback; + struct init_delay_op delay; +}; + +/* Init command operation types */ +enum init_op_types { + INIT_OP_READ, + INIT_OP_WRITE, + INIT_OP_IF_MODE, + INIT_OP_IF_PHASE, + INIT_OP_DELAY, + INIT_OP_CALLBACK, + MAX_INIT_OP_TYPES +}; + +/* init polling types */ +enum init_poll_types { + INIT_POLL_NONE, + INIT_POLL_EQ, + INIT_POLL_OR, + INIT_POLL_AND, + MAX_INIT_POLL_TYPES +}; + +/* init source types */ +enum init_source_types { + INIT_SRC_INLINE, + INIT_SRC_ZEROS, + INIT_SRC_ARRAY, + INIT_SRC_RUNTIME, + MAX_INIT_SOURCE_TYPES +}; + +/* Internal RAM Offsets macro data */ +struct iro { + u32 base; + u16 m1; + u16 m2; + u16 m3; + u16 size; +}; + +/* Win 2 */ +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL + +/* Win 3 */ +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL + +/* Win 4 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL + +/* Win 5 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL + +/* Win 6 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_2048 0x013000UL + +/* Win 7 */ +#define GTT_BAR0_MAP_REG_USDM_RAM 0x014000UL + +/* Win 8 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x015000UL + +/* Win 9 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x016000UL + +/* Win 10 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x017000UL + +/* Win 11 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM_1024 0x018000UL + +/* Win 12 */ +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x019000UL + +/* Win 13 */ +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL + +/* Returns the VOQ based on port and TC */ +#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \ + PURE_LB_TC ? NUM_OF_PHYS_TCS *\ + MAX_NUM_PORTS_BB + \ + (port) : (port) * \ + (max_phys_tcs_per_port) + (tc)) + +struct init_qm_pq_params; + +/** + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. + * + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. + * + * Return: The required host memory size in 4KB units. + * + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. + */ +u32 qed_qm_pf_mem_size(u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); + +struct qed_qm_common_rt_init_params { + u8 max_ports_per_engine; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool global_rl_en; + bool vport_wfq_en; + struct init_qm_port_params *port_params; + struct init_qm_global_rl_params + global_rl_params[COMMON_MAX_QM_GLOBAL_RLS]; +}; + +/** + * qed_qm_common_rt_init(): Prepare QM runtime init values for the + * engine phase. + * + * @p_hwfn: HW device data. + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params); + +struct qed_qm_pf_rt_init_params { + u8 port_id; + u8 pf_id; + u8 max_phys_tcs_per_port; + bool is_pf_loading; + u32 num_pf_cids; + u32 num_vf_cids; + u32 num_tids; + u16 start_pq; + u16 num_pf_pqs; + u16 num_vf_pqs; + u16 start_vport; + u16 num_vports; + u16 start_rl; + u16 num_rls; + u16 pf_wfq; + u32 pf_rl; + u32 link_speed; + struct init_qm_pq_params *pq_params; + struct init_qm_vport_params *vport_params; + struct init_qm_rl_params *rl_params; +}; + +/** + * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @p_params: Parameters. + * + * Return: 0 on success, -1 on error. + */ +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params); + +/** + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. + * + * Return: 0 on success, -1 on error. + */ +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); + +/** + * qed_init_pf_rl(): Initializes the rate limit of the specified PF + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units + * + * Return: 0 on success, -1 on error. + */ +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); + +/** + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. + * + * Return: 0 on success, -1 on error. + */ +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); + +/** + * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified + * VPORT and TC. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC. + * (filled by qed_qm_pf_rt_init). + * @weight: VPORT+TC WFQ weight. + * + * Return: 0 on success, -1 on error. + */ +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 weight); + +/** + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units + * @vport_rl_type: Vport RL type. + * + * Return: 0 on success, -1 on error. + */ +int qed_init_global_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type); + +/** + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. + * + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. + */ +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs); + +/** + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. + * + * Return: Void. + */ +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); + +/** + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. + * + * Return: Void. + */ +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool vxlan_enable); + +/** + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. + * + * Return: Void. + */ +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable); + +/** + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. + * + * Retur: Void. + */ +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port); + +/** + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. + * + * Return: Void. + */ +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable); + +void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable); + +/** + * qed_gft_disable(): Disable GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. + * + * Return: Void. + */ +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); + +/** + * qed_gft_config(): Enable and configure HW for GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. + * + * Return: Void. + */ +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type); + +/** + * qed_enable_context_validation(): Enable and configure context + * validation. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * + * Return: Void. + */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. + * + * Return: Void. + */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid); + +/** + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. + * + * Return: Void. + */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid); + +/** + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. + * + * Return: Void. + */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +/** + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. + * + * Return: Void. + */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); + +#define NUM_STORMS 6 + +/** + * qed_get_protocol_type_str(): Get a string for Protocol type. + * + * @protocol_type: Protocol type (using enum protocol_type). + * + * Return: String. + */ +const char *qed_get_protocol_type_str(u32 protocol_type); + +/** + * qed_get_ramrod_cmd_id_str(): Get a string for Ramrod command ID. + * + * @protocol_type: Protocol type (using enum protocol_type). + * @ramrod_cmd_id: Ramrod command ID (using per-protocol enum <protocol>_ramrod_cmd_id). + * + * Return: String. + */ +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id); + +/** + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. + * + * Return: Void. + */ +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]); +/** + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. + * + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. + * + * Return: A pointer to the allocated overlays memory, + * or NULL in case of failures. + */ +struct phys_mem_desc * +qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, + const u32 *const fw_overlay_in_buf, + u32 buf_size_in_bytes); + +/** + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. + * + * Return: Void. + */ +void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem); + +/** + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. + * + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. + * + * Return: Void. + */ +void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, + struct phys_mem_desc **fw_overlay_mem); + +#define PCICFG_OFFSET 0x2000 +#define GRC_CONFIG_REG_PF_INIT_VF 0x624 + +/* First VF_NUM for PF is encoded in this register. + * The number of VFs assigned to a PF is assumed to be a multiple of 8. + * Software should program these bits based on Total Number of VFs programmed + * for each PF. + * Since registers from 0x000-0x7ff are spilt across functions, each PF will + * have the same location for the same 4 bits + */ +#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff + +/* Runtime array offsets */ +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_VF_ICID_BIT_SHIFT_NORM_RT_OFFSET 16 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 17 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 18 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 21 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 22 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 23 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 24 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 25 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 26 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 762 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 1498 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 5914 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 5915 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 5916 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 5917 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 5918 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 5919 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 5920 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 5921 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 5922 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 5923 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 5924 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 5925 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 5926 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 5927 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 5928 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 5929 +#define SRC_REG_FIRSTFREE_RT_OFFSET 5930 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 5932 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 5934 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 5935 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 5936 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 5937 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 5938 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 5939 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 5940 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 5941 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 5942 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 5943 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 5944 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 5945 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 5946 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 5947 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 5948 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 5949 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 5950 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 5951 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 5952 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 5953 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5954 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5955 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 5956 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 5957 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 5958 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 5959 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 5960 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 5961 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 5962 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 5963 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 5964 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 5965 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 5966 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 5967 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 27967 +#define PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET 27968 +#define PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET 27969 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 27970 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 27971 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 27972 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 27973 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 27974 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 27975 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 27976 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 27977 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 27978 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 27979 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 28395 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 28907 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 28908 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 28909 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 28910 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 28911 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 28912 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 28913 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 28914 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 28915 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 28916 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 28917 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 28918 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 28919 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 28920 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 28921 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 28922 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 28923 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 28924 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 28925 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 28926 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 28927 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 28928 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 28929 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 28930 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 28931 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 28932 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 28933 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 28934 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 28935 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 28936 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 28937 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 28938 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 28939 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 28940 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 28941 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 28942 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 28943 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 28944 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 28945 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 28946 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 28947 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 28948 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 28949 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 28950 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 28951 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 28952 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 28953 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 28954 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 28955 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 28956 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 28957 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 28958 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 28959 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 28960 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 28961 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 28962 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 28963 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 28964 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 28965 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 28966 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 28967 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 28968 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 28969 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 28970 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 28971 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 28972 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 28973 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 28974 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_PTRTBLOTHER_RT_OFFSET 29102 +#define QM_REG_PTRTBLOTHER_RT_SIZE 256 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29358 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29378 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29398 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29399 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29400 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29401 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29402 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29403 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29404 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29405 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29406 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29407 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29408 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29409 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29410 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29411 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29412 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29413 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29414 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29415 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29416 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29417 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29418 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29419 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29420 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29421 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29422 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29423 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29424 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29425 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29426 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29427 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29428 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29429 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29430 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29431 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29432 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29433 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29434 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29435 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29436 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29437 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29438 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29439 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29440 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29441 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29442 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29443 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29444 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29445 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29446 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29447 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29448 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29449 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29450 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29451 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29452 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29453 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29454 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29455 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29456 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29457 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29458 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29459 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29460 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29461 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29462 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29463 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29464 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29465 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29466 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29467 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29468 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29469 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29470 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29471 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29472 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29473 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29474 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29475 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29476 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29477 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29478 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29479 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29480 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29481 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29482 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29483 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29484 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29485 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29486 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29487 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29488 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29489 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29490 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29491 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29492 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29493 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29494 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29495 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29496 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29497 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29498 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29499 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29500 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29501 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29502 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29503 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29504 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29505 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29506 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29507 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29508 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29509 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29510 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29511 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29512 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29513 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29514 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29515 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29516 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29517 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 29773 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30029 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30285 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30286 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30287 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30288 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30304 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30320 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30336 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30337 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30338 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30354 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30370 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 30530 +#define QM_REG_WFQVPENABLE_RT_OFFSET 30531 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 30532 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31044 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068 +#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32580 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33092 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_PTRTBLTX_RT_OFFSET 33604 +#define QM_REG_PTRTBLTX_RT_SIZE 1024 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788 +#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900 +#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983 + +#define RUNTIME_ARRAY_SIZE 34984 + +/* Init Callbacks */ +#define DMAE_READY_CB 0 + +/* The eth storm context for the Tstorm */ +struct tstorm_eth_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The eth storm context for the Pstorm */ +struct pstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The eth storm context for the Xstorm */ +struct xstorm_eth_conn_st_ctx { + __le32 reserved[60]; +}; + +struct xstorm_eth_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 e5_reserved1; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 updated_qm_pq_id; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; + __le32 reg18; + __le32 reg19; + __le16 word12; + __le16 word13; + __le16 word14; + __le16 word15; +}; + +/* The eth storm context for the Ystorm */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +struct ystorm_eth_conn_ag_ctx { + u8 byte0; + u8 state; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 tx_q0_int_coallecing_timeset; + u8 byte3; + __le16 word0; + __le32 terminate_spqe; + __le32 reg1; + __le16 tx_bd_cons_upd; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 rx_bd_cons; + u8 byte4; + u8 byte5; + __le16 rx_bd_prod; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 tx_bd_cons; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 tx_int_coallecing_timeset; + __le16 tx_drv_bd_cons; + __le16 rx_drv_cqe_cons; +}; + +/* The eth storm context for the Ustorm */ +struct ustorm_eth_conn_st_ctx { + __le32 reserved[40]; +}; + +/* The eth storm context for the Mstorm */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* eth connection context */ +struct eth_conn_context { + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct pstorm_eth_conn_st_ctx pstorm_st_context; + struct xstorm_eth_conn_st_ctx xstorm_st_context; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; + struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; +}; + +/* Ethernet filter types: mac/vlan/pair */ +enum eth_error_code { + ETH_OK = 0x00, + ETH_FILTERS_MAC_ADD_FAIL_FULL, + ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2, + ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF, + ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2, + ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2, + ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VLAN_ADD_FAIL_FULL, + ETH_FILTERS_VLAN_ADD_FAIL_DUP, + ETH_FILTERS_VLAN_DEL_FAIL_NOF, + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_DUP, + ETH_FILTERS_PAIR_ADD_FAIL_FULL, + ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC, + ETH_FILTERS_PAIR_DEL_FAIL_NOF, + ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1, + ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC, + ETH_FILTERS_VNI_ADD_FAIL_FULL, + ETH_FILTERS_VNI_ADD_FAIL_DUP, + ETH_FILTERS_GFT_UPDATE_FAIL, + ETH_RX_QUEUE_FAIL_LOAD_VF_DATA, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_MAX_HOPS, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_NO_FREE_ENRTY, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_ALREADY_EXISTS, + ETH_FILTERS_GFS_ADD_FILTER_FAIL_PCI_ERROR, + ETH_FILTERS_GFS_ADD_FINLER_FAIL_MAGIC_NUM_ERROR, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAX_HOPS, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_NO_MATCH_ENRTY, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_PCI_ERROR, + ETH_FILTERS_GFS_DEL_FILTER_FAIL_MAGIC_NUM_ERROR, + MAX_ETH_ERROR_CODE +}; + +/* Opcodes for the event ring */ +enum eth_event_opcode { + ETH_EVENT_UNUSED, + ETH_EVENT_VPORT_START, + ETH_EVENT_VPORT_UPDATE, + ETH_EVENT_VPORT_STOP, + ETH_EVENT_TX_QUEUE_START, + ETH_EVENT_TX_QUEUE_STOP, + ETH_EVENT_RX_QUEUE_START, + ETH_EVENT_RX_QUEUE_UPDATE, + ETH_EVENT_RX_QUEUE_STOP, + ETH_EVENT_FILTERS_UPDATE, + ETH_EVENT_RX_ADD_OPENFLOW_FILTER, + ETH_EVENT_RX_DELETE_OPENFLOW_FILTER, + ETH_EVENT_RX_CREATE_OPENFLOW_ACTION, + ETH_EVENT_RX_ADD_UDP_FILTER, + ETH_EVENT_RX_DELETE_UDP_FILTER, + ETH_EVENT_RX_CREATE_GFT_ACTION, + ETH_EVENT_RX_GFT_UPDATE_FILTER, + ETH_EVENT_TX_QUEUE_UPDATE, + ETH_EVENT_RGFS_ADD_FILTER, + ETH_EVENT_RGFS_DEL_FILTER, + ETH_EVENT_TGFS_ADD_FILTER, + ETH_EVENT_TGFS_DEL_FILTER, + ETH_EVENT_GFS_COUNTERS_REPORT_REQUEST, + MAX_ETH_EVENT_OPCODE +}; + +/* Classify rule types in E2/E3 */ +enum eth_filter_action { + ETH_FILTER_ACTION_UNUSED, + ETH_FILTER_ACTION_REMOVE, + ETH_FILTER_ACTION_ADD, + ETH_FILTER_ACTION_REMOVE_ALL, + MAX_ETH_FILTER_ACTION +}; + +/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */ +struct eth_filter_cmd { + u8 type; + u8 vport_id; + u8 action; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; +}; + +/* $$KEEP_ENDIANNESS$$ */ +struct eth_filter_cmd_header { + u8 rx; + u8 tx; + u8 cmd_cnt; + u8 assert_on_error; + u8 reserved1[4]; +}; + +/* Ethernet filter types: mac/vlan/pair */ +enum eth_filter_type { + ETH_FILTER_TYPE_UNUSED, + ETH_FILTER_TYPE_MAC, + ETH_FILTER_TYPE_VLAN, + ETH_FILTER_TYPE_PAIR, + ETH_FILTER_TYPE_INNER_MAC, + ETH_FILTER_TYPE_INNER_VLAN, + ETH_FILTER_TYPE_INNER_PAIR, + ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR, + ETH_FILTER_TYPE_MAC_VNI_PAIR, + ETH_FILTER_TYPE_VNI, + MAX_ETH_FILTER_TYPE +}; + +/* inner to inner vlan priority translation configurations */ +struct eth_in_to_in_pri_map_cfg { + u8 inner_vlan_pri_remap_en; + u8 reserved[7]; + u8 non_rdma_in_to_in_pri_map[8]; + u8 rdma_in_to_in_pri_map[8]; +}; + +/* Eth IPv4 Fragment Type */ +enum eth_ipv4_frag_type { + ETH_IPV4_NOT_FRAG, + ETH_IPV4_FIRST_FRAG, + ETH_IPV4_NON_FIRST_FRAG, + MAX_ETH_IPV4_FRAG_TYPE +}; + +/* eth IPv4 Fragment Type */ +enum eth_ip_type { + ETH_IPV4, + ETH_IPV6, + MAX_ETH_IP_TYPE +}; + +/* Ethernet Ramrod Command IDs */ +enum eth_ramrod_cmd_id { + ETH_RAMROD_UNUSED, + ETH_RAMROD_VPORT_START, + ETH_RAMROD_VPORT_UPDATE, + ETH_RAMROD_VPORT_STOP, + ETH_RAMROD_RX_QUEUE_START, + ETH_RAMROD_RX_QUEUE_STOP, + ETH_RAMROD_TX_QUEUE_START, + ETH_RAMROD_TX_QUEUE_STOP, + ETH_RAMROD_FILTERS_UPDATE, + ETH_RAMROD_RX_QUEUE_UPDATE, + ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION, + ETH_RAMROD_RX_ADD_OPENFLOW_FILTER, + ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER, + ETH_RAMROD_RX_ADD_UDP_FILTER, + ETH_RAMROD_RX_DELETE_UDP_FILTER, + ETH_RAMROD_RX_CREATE_GFT_ACTION, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, + ETH_RAMROD_TX_QUEUE_UPDATE, + ETH_RAMROD_RGFS_FILTER_ADD, + ETH_RAMROD_RGFS_FILTER_DEL, + ETH_RAMROD_TGFS_FILTER_ADD, + ETH_RAMROD_TGFS_FILTER_DEL, + ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST, + MAX_ETH_RAMROD_CMD_ID +}; + +/* Return code from eth sp ramrods */ +struct eth_return_code { + u8 value; +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x3F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x1 +#define ETH_RETURN_CODE_RESERVED_SHIFT 6 +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + +/* tx destination enum */ +enum eth_tx_dst_mode_config_enum { + ETH_TX_DST_MODE_CONFIG_DISABLE, + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_BD, + ETH_TX_DST_MODE_CONFIG_FORWARD_DATA_IN_VPORT, + MAX_ETH_TX_DST_MODE_CONFIG_ENUM +}; + +/* What to do in case an error occurs */ +enum eth_tx_err { + ETH_TX_ERR_DROP, + ETH_TX_ERR_ASSERT_MALICIOUS, + MAX_ETH_TX_ERR +}; + +/* Array of the different error type behaviors */ +struct eth_tx_err_vals { + __le16 values; +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_BD_FLAGS_SHIFT 7 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0xFF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 8 +}; + +/* vport rss configuration data */ +struct eth_vport_rss_config { + __le16 capabilities; +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 + u8 rss_id; + u8 rss_mode; + u8 update_rss_key; + u8 update_rss_ind_table; + u8 update_rss_capabilities; + u8 tbl_size; + u8 ind_table_mask_valid; + u8 reserved2[3]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM]; + __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS]; + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS]; + __le32 reserved3; +}; + +/* eth vport RSS mode */ +enum eth_vport_rss_mode { + ETH_VPORT_RSS_MODE_DISABLED, + ETH_VPORT_RSS_MODE_REGULAR, + MAX_ETH_VPORT_RSS_MODE +}; + +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ +struct eth_vport_rx_mode { + __le16 state; +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_MASK 0x1 +#define ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI_SHIFT 6 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x1FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 7 +}; + +/* Command for setting tpa parameters */ +struct eth_vport_tpa_param { + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + + u8 tpa_max_aggs_num; + + __le16 tpa_max_size; + __le16 tpa_min_size_to_start; + + __le16 tpa_min_size_to_cont; + u8 max_buff_num; + u8 reserved; +}; + +/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */ +struct eth_vport_tx_mode { + __le16 state; +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 +}; + +/* GFT filter update action type */ +enum gft_filter_update_action { + GFT_ADD_FILTER, + GFT_DELETE_FILTER, + MAX_GFT_FILTER_UPDATE_ACTION +}; + +/* Ramrod data for rx create gft action */ +struct rx_create_gft_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for rx create openflow action */ +struct rx_create_openflow_action_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for rx add openflow filter */ +struct rx_openflow_filter_ramrod_data { + __le16 action_icid; + u8 priority; + u8 reserved0; + __le32 tenant_id; + __le16 dst_mac_hi; + __le16 dst_mac_mid; + __le16 dst_mac_lo; + __le16 src_mac_hi; + __le16 src_mac_mid; + __le16 src_mac_lo; + __le16 vlan_id; + __le16 l2_eth_type; + u8 ipv4_dscp; + u8 ipv4_frag_type; + u8 ipv4_over_ip; + u8 tenant_id_exists; + __le32 ipv4_dst_addr; + __le32 ipv4_src_addr; + __le16 l4_dst_port; + __le16 l4_src_port; +}; + +/* Ramrod data for rx queue start ramrod */ +struct rx_queue_start_ramrod_data { + __le16 rx_queue_id; + __le16 num_of_pbl_pages; + __le16 bd_max_bytes; + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 default_rss_queue_flg; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 stats_counter_id; + u8 pin_context; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + u8 pxp_st_hint; + + __le16 pxp_st_index; + u8 pmd_mode; + + u8 notify_en; + u8 toggle_val; + + u8 vf_rx_prod_index; + u8 vf_rx_prod_use_zone_a; + u8 reserved[5]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; +}; + +/* Ramrod data for rx queue stop ramrod */ +struct rx_queue_stop_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[3]; +}; + +/* Ramrod data for rx queue update ramrod */ +struct rx_queue_update_ramrod_data { + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 set_default_rss_queue; + u8 reserved[3]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + struct regpair reserved6; +}; + +/* Ramrod data for rx Add UDP Filter */ +struct rx_udp_filter_ramrod_data { + __le16 action_icid; + __le16 vlan_id; + u8 ip_type; + u8 tenant_id_exists; + __le16 reserved1; + __le32 ip_dst_addr[4]; + __le32 ip_src_addr[4]; + __le16 udp_dst_port; + __le16 udp_src_port; + __le32 tenant_id; +}; + +/* Add or delete GFT filter - filter is packet header of type of packet wished + * to pass certain FW flow. + */ +struct rx_update_gft_filter_ramrod_data { + struct regpair pkt_hdr_addr; + __le16 pkt_hdr_length; + __le16 action_icid; + __le16 rx_qid; + __le16 flow_id; + __le16 vport_id; + u8 action_icid_valid; + u8 rx_qid_valid; + u8 flow_id_valid; + u8 filter_action; + u8 assert_on_error; + u8 inner_vlan_removal_en; +}; + +/* Ramrod data for tx queue start ramrod */ +struct tx_queue_start_ramrod_data { + __le16 sb_id; + u8 sb_index; + u8 vport_id; + u8 reserved0; + u8 stats_counter_id; + __le16 qm_pq_id; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x7 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 5 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + u8 comp_agg_size; + u8 reserved3; + __le16 queue_zone_id; + __le16 reserved2; + __le16 pbl_size; + __le16 tx_queue_id; + __le16 same_as_last_id; + __le16 reserved[3]; + struct regpair pbl_base_addr; + struct regpair bd_cons_address; +}; + +/* Ramrod data for tx queue stop ramrod */ +struct tx_queue_stop_ramrod_data { + __le16 reserved[4]; +}; + +/* Ramrod data for tx queue update ramrod */ +struct tx_queue_update_ramrod_data { + __le16 update_qm_pq_id_flg; + __le16 qm_pq_id; + __le32 reserved0; + struct regpair reserved1[5]; +}; + +/* Inner to Inner VLAN priority map update mode */ +enum update_in_to_in_pri_map_mode_enum { + ETH_IN_TO_IN_PRI_MAP_UPDATE_DISABLED, + ETH_IN_TO_IN_PRI_MAP_UPDATE_NON_RDMA_TBL, + ETH_IN_TO_IN_PRI_MAP_UPDATE_RDMA_TBL, + MAX_UPDATE_IN_TO_IN_PRI_MAP_MODE_ENUM +}; + +/* Ramrod data for vport update ramrod */ +struct vport_filter_update_ramrod_data { + struct eth_filter_cmd_header filter_cmd_hdr; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT]; +}; + +/* Ramrod data for vport start ramrod */ +struct vport_start_ramrod_data { + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + struct eth_vport_tpa_param tpa_param; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + u8 zero_placement_offset; + u8 ctl_frame_mac_check_en; + u8 ctl_frame_ethtype_check_en; + u8 reserved0; + u8 reserved1; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; + u8 wipe_inner_vlan_pri_en; + u8 reserved2[2]; + struct eth_in_to_in_pri_map_cfg in_to_in_vlan_pri_map_cfg; +}; + +/* Ramrod data for vport stop ramrod */ +struct vport_stop_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* Ramrod data for vport update ramrod */ +struct vport_update_ramrod_data_cmn { + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + + u8 default_vlan_en; + + u8 update_default_vlan_flg; + + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + + __le16 mtu; + u8 update_ctl_frame_checks_en_flg; + u8 ctl_frame_mac_check_en; + u8 ctl_frame_ethtype_check_en; + u8 update_in_to_in_pri_map_mode; + u8 in_to_in_pri_map[8]; + u8 update_tx_dst_port_mode_flg; + u8 tx_dst_port_mode_config; + u8 dst_vport_id; + u8 tx_dst_port_mode; + u8 dst_vport_id_valid; + u8 reserved[1]; +}; + +struct vport_update_ramrod_mcast { + __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; +}; + +/* Ramrod data for vport update ramrod */ +struct vport_update_ramrod_data { + struct vport_update_ramrod_data_cmn common; + + struct eth_vport_rx_mode rx_mode; + struct eth_vport_tx_mode tx_mode; + __le32 reserved[3]; + struct eth_vport_tpa_param tpa_param; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config; +}; + +struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart { + u8 reserved0; + u8 state; + u8 flags0; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 + u8 flags1; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_E5_RESERVED3_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 + u8 flags4; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 + u8 flags11; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define E4XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 e5_reserved1; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 updated_qm_pq_id; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; +}; + +struct mstorm_eth_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct xstorm_eth_hw_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id; + __le16 physical_q0; + __le16 e5_reserved1; + __le16 edpm_num_bds; + __le16 tx_bd_cons; + __le16 tx_bd_prod; + __le16 updated_qm_pq_id; + __le16 conn_dpi; +}; + +/* GFT CAM line struct with fields breakout */ +struct gft_cam_line_mapped { + __le32 camline; +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + +/* Used in gft_profile_key: Indication for ip version */ +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + +/* Profile key stucr fot GFT logic in Prs */ +struct gft_profile_key { + __le16 profile_key; +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +/* Used in gft_profile_key: Indication for tunnel type */ +enum gft_profile_tunnel_type { + GFT_PROFILE_NO_TUNNEL = 0, + GFT_PROFILE_VXLAN_TUNNEL = 1, + GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2, + GFT_PROFILE_GRE_IP_TUNNEL = 3, + GFT_PROFILE_GENEVE_MAC_TUNNEL = 4, + GFT_PROFILE_GENEVE_IP_TUNNEL = 5, + MAX_GFT_PROFILE_TUNNEL_TYPE +}; + +/* Used in gft_profile_key: Indication for protocol type */ +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + +/* GFT RAM line struct */ +struct gft_ram_line { + __le32 lo; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 hi; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + +/* Used in the first 2 bits for gft_ram_line: Indication for vlan mask */ +enum gft_vlan_select { + INNER_PROVIDER_VLAN = 0, + INNER_VLAN = 1, + OUTER_PROVIDER_VLAN = 2, + OUTER_VLAN = 3, + MAX_GFT_VLAN_SELECT +}; + +/* The rdma task context of Mstorm */ +struct ystorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +struct ystorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 msem_ctx_upd_seq; + u8 flags0; +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 + u8 flags1; +#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 +#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt_or_qp_id; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +struct mstorm_rdma_task_ag_ctx { + u8 reserved; + u8 byte1; + __le16 icid; + u8 flags0; +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7 + u8 flags1; +#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7 + u8 flags2; +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7 + u8 key; + __le32 mw_cnt_or_qp_id; + u8 ref_cnt_seq; + u8 ctx_upd_seq; + __le16 dif_flags; + __le16 tx_ref_count; + __le16 last_used_ltid; + __le16 parent_mr_lo; + __le16 parent_mr_hi; + __le32 fbo_lo; + __le32 fbo_hi; +}; + +/* The roce task context of Mstorm */ +struct mstorm_rdma_task_st_ctx { + struct regpair temp[4]; +}; + +/* The roce task context of Ustorm */ +struct ustorm_rdma_task_st_ctx { + struct regpair temp[6]; +}; + +struct ustorm_rdma_task_ag_ctx { + u8 reserved; + u8 state; + __le16 icid; + u8 flags0; +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3 +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF +#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 + __le32 dif_err_intervals; + __le32 dif_error_1st_interval; + __le32 dif_rxmit_cons; + __le32 dif_rxmit_prod; + __le32 sge_index; + __le32 sq_cons; + u8 byte2; + u8 byte3; + __le16 dif_write_cons; + __le16 dif_write_prod; + __le16 word3; + __le32 dif_error_buffer_address_lo; + __le32 dif_error_buffer_address_hi; +}; + +/* RDMA task context */ +struct rdma_task_context { + struct ystorm_rdma_task_st_ctx ystorm_st_context; + struct ystorm_rdma_task_ag_ctx ystorm_ag_context; + struct tdif_task_context tdif_context; + struct mstorm_rdma_task_ag_ctx mstorm_ag_context; + struct mstorm_rdma_task_st_ctx mstorm_st_context; + struct rdif_task_context rdif_context; + struct ustorm_rdma_task_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct ustorm_rdma_task_ag_ctx ustorm_ag_context; +}; + +#define TOE_MAX_RAMROD_PER_PF 8 +#define TOE_TX_PAGE_SIZE_BYTES 4096 +#define TOE_GRQ_PAGE_SIZE_BYTES 4096 +#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096 + +#define TOE_RX_MAX_RSS_CHAINS 64 +#define TOE_TX_MAX_TSS_CHAINS 64 +#define TOE_RSS_INDIRECTION_TABLE_SIZE 128 + +/* The toe storm context of Mstorm */ +struct mstorm_toe_conn_st_ctx { + __le32 reserved[24]; +}; + +/* The toe storm context of Pstorm */ +struct pstorm_toe_conn_st_ctx { + __le32 reserved[36]; +}; + +/* The toe storm context of Ystorm */ +struct ystorm_toe_conn_st_ctx { + __le32 reserved[8]; +}; + +/* The toe storm context of Xstorm */ +struct xstorm_toe_conn_st_ctx { + __le32 reserved[44]; +}; + +struct ystorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1 +#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7 + u8 completion_opcode; + u8 byte3; + __le16 word0; + __le32 rel_seq; + __le32 rel_seq_threshold; + __le16 app_prod; + __le16 app_cons; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct xstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 word2; + __le16 word3; + __le16 bd_prod; + __le16 word5; + __le16 word6; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 local_adv_wnd_seq; + __le32 reg5; + __le32 reg6; + __le16 word7; + __le16 word8; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; + __le32 reg9; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 reg12; + __le32 reg13; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_toe_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6 + u8 flags1; +#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; +}; + +struct ustorm_toe_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6 + u8 flags1; +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The toe storm context of Tstorm */ +struct tstorm_toe_conn_st_ctx { + __le32 reserved[16]; +}; + +/* The toe storm context of Ustorm */ +struct ustorm_toe_conn_st_ctx { + __le32 reserved[52]; +}; + +/* toe connection context */ +struct toe_conn_context { + struct ystorm_toe_conn_st_ctx ystorm_st_context; + struct pstorm_toe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_toe_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct ystorm_toe_conn_ag_ctx ystorm_ag_context; + struct xstorm_toe_conn_ag_ctx xstorm_ag_context; + struct tstorm_toe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_toe_conn_ag_ctx ustorm_ag_context; + struct tstorm_toe_conn_st_ctx tstorm_st_context; + struct mstorm_toe_conn_st_ctx mstorm_st_context; + struct ustorm_toe_conn_st_ctx ustorm_st_context; +}; + +/* toe init ramrod header */ +struct toe_init_ramrod_header { + u8 first_rss; + u8 num_rss; + u8 reserved[6]; +}; + +/* toe pf init parameters */ +struct toe_pf_init_params { + __le32 push_timeout; + __le16 grq_buffer_size; + __le16 grq_sb_id; + u8 grq_sb_index; + u8 max_seg_retransmit; + u8 doubt_reachability; + u8 ll2_rx_queue_id; + __le16 grq_fetch_threshold; + u8 reserved1[2]; + struct regpair grq_page_addr; +}; + +/* toe tss parameters */ +struct toe_tss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe rss parameters */ +struct toe_rss_params { + struct regpair curr_page_addr; + struct regpair next_page_addr; + u8 reserved0; + u8 status_block_index; + __le16 status_block_id; + __le16 reserved1[2]; +}; + +/* toe init ramrod data */ +struct toe_init_ramrod_data { + struct toe_init_ramrod_header hdr; + struct tcp_init_params tcp_params; + struct toe_pf_init_params pf_params; + struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS]; + struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS]; +}; + +/* toe offload parameters */ +struct toe_offload_params { + struct regpair tx_bd_page_addr; + struct regpair tx_app_page_addr; + __le32 more_to_send_seq; + __le16 rcv_indication_size; + u8 rss_tss_id; + u8 ignore_grq_push; + struct regpair rx_db_data_ptr; +}; + +/* TOE offload ramrod data - DMAed by firmware */ +struct toe_offload_ramrod_data { + struct tcp_offload_params tcp_ofld_params; + struct toe_offload_params toe_ofld_params; +}; + +/* TOE ramrod command IDs */ +enum toe_ramrod_cmd_id { + TOE_RAMROD_UNUSED, + TOE_RAMROD_FUNC_INIT, + TOE_RAMROD_INITATE_OFFLOAD, + TOE_RAMROD_FUNC_CLOSE, + TOE_RAMROD_SEARCHER_DELETE, + TOE_RAMROD_TERMINATE, + TOE_RAMROD_QUERY, + TOE_RAMROD_UPDATE, + TOE_RAMROD_EMPTY, + TOE_RAMROD_RESET_SEND, + TOE_RAMROD_INVALIDATE, + MAX_TOE_RAMROD_CMD_ID +}; + +/* Toe RQ buffer descriptor */ +struct toe_rx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_RX_BD_START_MASK 0x1 +#define TOE_RX_BD_START_SHIFT 0 +#define TOE_RX_BD_END_MASK 0x1 +#define TOE_RX_BD_END_SHIFT 1 +#define TOE_RX_BD_NO_PUSH_MASK 0x1 +#define TOE_RX_BD_NO_PUSH_SHIFT 2 +#define TOE_RX_BD_SPLIT_MASK 0x1 +#define TOE_RX_BD_SPLIT_SHIFT 3 +#define TOE_RX_BD_RESERVED0_MASK 0xFFF +#define TOE_RX_BD_RESERVED0_SHIFT 4 + __le32 reserved1; +}; + +/* TOE RX completion queue opcodes (opcode 0 is illegal) */ +enum toe_rx_cmp_opcode { + TOE_RX_CMP_OPCODE_GA = 1, + TOE_RX_CMP_OPCODE_GR = 2, + TOE_RX_CMP_OPCODE_GNI = 3, + TOE_RX_CMP_OPCODE_GAIR = 4, + TOE_RX_CMP_OPCODE_GAIL = 5, + TOE_RX_CMP_OPCODE_GRI = 6, + TOE_RX_CMP_OPCODE_GJ = 7, + TOE_RX_CMP_OPCODE_DGI = 8, + TOE_RX_CMP_OPCODE_CMP = 9, + TOE_RX_CMP_OPCODE_REL = 10, + TOE_RX_CMP_OPCODE_SKP = 11, + TOE_RX_CMP_OPCODE_URG = 12, + TOE_RX_CMP_OPCODE_RT_TO = 13, + TOE_RX_CMP_OPCODE_KA_TO = 14, + TOE_RX_CMP_OPCODE_MAX_RT = 15, + TOE_RX_CMP_OPCODE_DBT_RE = 16, + TOE_RX_CMP_OPCODE_SYN = 17, + TOE_RX_CMP_OPCODE_OPT_ERR = 18, + TOE_RX_CMP_OPCODE_FW2_TO = 19, + TOE_RX_CMP_OPCODE_2WY_CLS = 20, + TOE_RX_CMP_OPCODE_RST_RCV = 21, + TOE_RX_CMP_OPCODE_FIN_RCV = 22, + TOE_RX_CMP_OPCODE_FIN_UPL = 23, + TOE_RX_CMP_OPCODE_INIT = 32, + TOE_RX_CMP_OPCODE_RSS_UPDATE = 33, + TOE_RX_CMP_OPCODE_CLOSE = 34, + TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80, + TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81, + TOE_RX_CMP_OPCODE_TERMINATE = 82, + TOE_RX_CMP_OPCODE_QUERY = 83, + TOE_RX_CMP_OPCODE_RESET_SEND = 84, + TOE_RX_CMP_OPCODE_INVALIDATE = 85, + TOE_RX_CMP_OPCODE_EMPTY = 86, + TOE_RX_CMP_OPCODE_UPDATE = 87, + MAX_TOE_RX_CMP_OPCODE +}; + +/* TOE rx ooo completion data */ +struct toe_rx_cqe_ooo_params { + __le32 nbytes; + __le16 grq_buff_id; + u8 isle_num; + u8 reserved0; +}; + +/* TOE rx in order completion data */ +struct toe_rx_cqe_in_order_params { + __le32 nbytes; + __le16 grq_buff_id; + __le16 reserved1; +}; + +/* Union for TOE rx completion data */ +union toe_rx_cqe_data_union { + struct toe_rx_cqe_ooo_params ooo_params; + struct toe_rx_cqe_in_order_params in_order_params; + struct regpair raw_data; +}; + +/* TOE rx completion element */ +struct toe_rx_cqe { + __le16 icid; + u8 completion_opcode; + u8 reserved0; + __le32 reserved1; + union toe_rx_cqe_data_union data; +}; + +/* toe RX doorbel data */ +struct toe_rx_db_data { + __le32 local_adv_wnd_seq; + __le32 reserved[3]; +}; + +/* Toe GRQ buffer descriptor */ +struct toe_rx_grq_bd { + struct regpair addr; + __le16 buff_id; + __le16 reserved0; + __le32 reserved1; +}; + +/* Toe transmission application buffer descriptor */ +struct toe_tx_app_buff_desc { + __le32 next_buffer_start_seq; + __le32 reserved; +}; + +/* Toe transmission application buffer descriptor page pointer */ +struct toe_tx_app_buff_page_pointer { + struct regpair next_page_addr; +}; + +/* Toe transmission buffer descriptor */ +struct toe_tx_bd { + struct regpair addr; + __le16 size; + __le16 flags; +#define TOE_TX_BD_PUSH_MASK 0x1 +#define TOE_TX_BD_PUSH_SHIFT 0 +#define TOE_TX_BD_NOTIFY_MASK 0x1 +#define TOE_TX_BD_NOTIFY_SHIFT 1 +#define TOE_TX_BD_LARGE_IO_MASK 0x1 +#define TOE_TX_BD_LARGE_IO_SHIFT 2 +#define TOE_TX_BD_BD_CONS_MASK 0x1FFF +#define TOE_TX_BD_BD_CONS_SHIFT 3 + __le32 next_bd_start_seq; +}; + +/* TOE completion opcodes */ +enum toe_tx_cmp_opcode { + TOE_TX_CMP_OPCODE_DATA, + TOE_TX_CMP_OPCODE_TERMINATE, + TOE_TX_CMP_OPCODE_EMPTY, + TOE_TX_CMP_OPCODE_RESET_SEND, + TOE_TX_CMP_OPCODE_INVALIDATE, + TOE_TX_CMP_OPCODE_RST_RCV, + MAX_TOE_TX_CMP_OPCODE +}; + +/* Toe transmission completion element */ +struct toe_tx_cqe { + __le16 icid; + u8 opcode; + u8 reserved; + __le32 size; +}; + +/* Toe transmission page pointer bd */ +struct toe_tx_page_pointer_bd { + struct regpair next_page_addr; + struct regpair prev_page_addr; +}; + +/* Toe transmission completion element page pointer */ +struct toe_tx_page_pointer_cqe { + struct regpair next_page_addr; +}; + +/* toe update parameters */ +struct toe_update_params { + __le16 flags; +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1 +#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0 +#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF +#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1 + __le16 rcv_indication_size; + __le16 reserved1[2]; +}; + +/* TOE update ramrod data - DMAed by firmware */ +struct toe_update_ramrod_data { + struct tcp_update_params tcp_upd_params; + struct toe_update_params toe_upd_params; +}; + +struct mstorm_toe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* TOE doorbell data */ +struct toe_db_data { + u8 params; +#define TOE_DB_DATA_DEST_MASK 0x3 +#define TOE_DB_DATA_DEST_SHIFT 0 +#define TOE_DB_DATA_AGG_CMD_MASK 0x3 +#define TOE_DB_DATA_AGG_CMD_SHIFT 2 +#define TOE_DB_DATA_BYPASS_EN_MASK 0x1 +#define TOE_DB_DATA_BYPASS_EN_SHIFT 4 +#define TOE_DB_DATA_RESERVED_MASK 0x1 +#define TOE_DB_DATA_RESERVED_SHIFT 5 +#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +/* rdma function init ramrod data */ +struct rdma_close_func_ramrod_data { + u8 cnq_start_offset; + u8 num_cnqs; + u8 vf_id; + u8 vf_valid; + u8 reserved[4]; +}; + +/* rdma function init CNQ parameters */ +struct rdma_cnq_params { + __le16 sb_num; + u8 sb_index; + u8 num_pbl_pages; + __le32 reserved; + struct regpair pbl_base_addr; + __le16 queue_zone_num; + u8 reserved1[6]; +}; + +/* rdma create cq ramrod data */ +struct rdma_create_cq_ramrod_data { + struct regpair cq_handle; + struct regpair pbl_addr; + __le32 max_cqes; + __le16 pbl_num_pages; + __le16 dpi; + u8 is_two_level_pbl; + u8 cnq_id; + u8 pbl_log_page_size; + u8 toggle_bit; + __le16 int_timeout; + u8 vf_id; + u8 flags; +#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define RDMA_CREATE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 0 +#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_MASK 0x7F +#define RDMA_CREATE_CQ_RAMROD_DATA_RESERVED1_SHIFT 1 +}; + +/* rdma deregister tid ramrod data */ +struct rdma_deregister_tid_ramrod_data { + __le32 itid; + __le32 reserved; +}; + +/* rdma destroy cq output params */ +struct rdma_destroy_cq_output_params { + __le16 cnq_num; + __le16 reserved0; + __le32 reserved1; +}; + +/* rdma destroy cq ramrod data */ +struct rdma_destroy_cq_ramrod_data { + struct regpair output_params_addr; +}; + +/* RDMA slow path EQ cmd IDs */ +enum rdma_event_opcode { + RDMA_EVENT_UNUSED, + RDMA_EVENT_FUNC_INIT, + RDMA_EVENT_FUNC_CLOSE, + RDMA_EVENT_REGISTER_MR, + RDMA_EVENT_DEREGISTER_MR, + RDMA_EVENT_CREATE_CQ, + RDMA_EVENT_RESIZE_CQ, + RDMA_EVENT_DESTROY_CQ, + RDMA_EVENT_CREATE_SRQ, + RDMA_EVENT_MODIFY_SRQ, + RDMA_EVENT_DESTROY_SRQ, + RDMA_EVENT_START_NAMESPACE_TRACKING, + RDMA_EVENT_STOP_NAMESPACE_TRACKING, + MAX_RDMA_EVENT_OPCODE +}; + +/* RDMA FW return code for slow path ramrods */ +enum rdma_fw_return_code { + RDMA_RETURN_OK = 0, + RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR, + RDMA_RETURN_RESIZE_CQ_ERR, + RDMA_RETURN_NIG_DRAIN_REQ, + RDMA_RETURN_GENERAL_ERR, + MAX_RDMA_FW_RETURN_CODE +}; + +/* rdma function init header */ +struct rdma_init_func_hdr { + u8 cnq_start_offset; + u8 num_cnqs; + u8 cq_ring_mode; + u8 vf_id; + u8 vf_valid; + u8 relaxed_ordering; + __le16 first_reg_srq_id; + __le32 reg_srq_base_addr; + u8 flags; +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1 +#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2 +#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F +#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3 + u8 dpt_byte_threshold_log; + u8 dpt_common_queue_id; + u8 max_num_ns_log; +}; + +/* rdma function init ramrod data */ +struct rdma_init_func_ramrod_data { + struct rdma_init_func_hdr params_header; + struct rdma_cnq_params dptq_params; + struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES]; +}; + +/* rdma namespace tracking ramrod data */ +struct rdma_namespace_tracking_ramrod_data { + u8 name_space; + u8 reserved[7]; +}; + +/* RDMA ramrod command IDs */ +enum rdma_ramrod_cmd_id { + RDMA_RAMROD_UNUSED, + RDMA_RAMROD_FUNC_INIT, + RDMA_RAMROD_FUNC_CLOSE, + RDMA_RAMROD_REGISTER_MR, + RDMA_RAMROD_DEREGISTER_MR, + RDMA_RAMROD_CREATE_CQ, + RDMA_RAMROD_RESIZE_CQ, + RDMA_RAMROD_DESTROY_CQ, + RDMA_RAMROD_CREATE_SRQ, + RDMA_RAMROD_MODIFY_SRQ, + RDMA_RAMROD_DESTROY_SRQ, + RDMA_RAMROD_START_NS_TRACKING, + RDMA_RAMROD_STOP_NS_TRACKING, + MAX_RDMA_RAMROD_CMD_ID +}; + +/* rdma register tid ramrod data */ +struct rdma_register_tid_ramrod_data { + __le16 flags; +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 5 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 6 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 7 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 8 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 9 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 10 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 11 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 12 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 13 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_MASK 0x3 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED_SHIFT 14 + u8 flags1; +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F +#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7 +#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5 + u8 flags2; +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1 +#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1 +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2 + u8 key; + u8 length_hi; + u8 vf_id; + u8 vf_valid; + __le16 pd; + __le16 reserved2; + __le32 length_lo; + __le32 itid; + __le32 reserved3; + struct regpair va; + struct regpair pbl_base; + struct regpair dif_error_addr; + __le32 reserved4[4]; +}; + +/* rdma resize cq output params */ +struct rdma_resize_cq_output_params { + __le32 old_cq_cons; + __le32 old_cq_prod; +}; + +/* rdma resize cq ramrod data */ +struct rdma_resize_cq_ramrod_data { + u8 flags; +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define RDMA_RESIZE_CQ_RAMROD_DATA_VF_ID_VALID_SHIFT 2 +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 3 + u8 pbl_log_page_size; + __le16 pbl_num_pages; + __le32 max_cqes; + struct regpair pbl_addr; + struct regpair output_params_addr; + u8 vf_id; + u8 reserved1[7]; +}; + +/* The rdma SRQ context */ +struct rdma_srq_context { + struct regpair temp[8]; +}; + +/* rdma create qp requester ramrod data */ +struct rdma_srq_create_ramrod_data { + u8 flags; +#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG_SHIFT 0 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 1 +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_MASK 0x3F +#define RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED1_SHIFT 2 + u8 reserved2; + __le16 xrc_domain; + __le32 xrc_srq_cq_cid; + struct regpair pbl_base_addr; + __le16 pages_in_srq_pbl; + __le16 pd_id; + struct rdma_srq_id srq_id; + __le16 page_size; + __le16 reserved3; + __le32 reserved4; + struct regpair producers_addr; +}; + +/* rdma create qp requester ramrod data */ +struct rdma_srq_destroy_ramrod_data { + struct rdma_srq_id srq_id; + __le32 reserved; +}; + +/* rdma create qp requester ramrod data */ +struct rdma_srq_modify_ramrod_data { + struct rdma_srq_id srq_id; + __le32 wqe_limit; +}; + +/* RDMA Tid type enumeration (for register_tid ramrod) */ +enum rdma_tid_type { + RDMA_TID_REGISTERED_MR, + RDMA_TID_FMR, + RDMA_TID_MW, + MAX_RDMA_TID_TYPE +}; + +/* The rdma XRC SRQ context */ +struct rdma_xrc_srq_context { + struct regpair temp[9]; +}; + +struct tstorm_rdma_task_ag_ctx { + u8 byte0; + u8 byte1; + __le16 word0; + u8 flags0; +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF +#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7 + u8 flags1; +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6 + u8 flags2; +#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6 + u8 flags3; +#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3 +#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7 + u8 flags4; +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7 + u8 byte2; + __le16 word1; + __le32 reg0; + u8 byte3; + u8 byte4; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg1; + __le32 reg2; +}; + +struct ustorm_rdma_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 nvmf_only; + __le16 conn_dpi; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 int_timeout; + __le16 word3; +}; + +struct xstorm_roce_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; + __le32 reg5; + __le32 reg6; +}; + +struct tstorm_roce_conn_ag_ctx { + u8 reserved0; + u8 byte1; + u8 flags0; +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 byte2; + u8 byte3; + __le16 word0; + u8 byte4; + u8 byte5; + __le16 word1; + __le16 word2; + __le16 word3; + __le32 reg9; + __le32 reg10; +}; + +/* The roce storm context of Ystorm */ +struct ystorm_roce_conn_st_ctx { + struct regpair temp[2]; +}; + +/* The roce storm context of Mstorm */ +struct pstorm_roce_conn_st_ctx { + struct regpair temp[16]; +}; + +/* The roce storm context of Xstorm */ +struct xstorm_roce_conn_st_ctx { + struct regpair temp[24]; +}; + +/* The roce storm context of Tstorm */ +struct tstorm_roce_conn_st_ctx { + struct regpair temp[30]; +}; + +/* The roce storm context of Mstorm */ +struct mstorm_roce_conn_st_ctx { + struct regpair temp[6]; +}; + +/* The roce storm context of Ustorm */ +struct ustorm_roce_conn_st_ctx { + struct regpair temp[14]; +}; + +/* roce connection context */ +struct roce_conn_context { + struct ystorm_roce_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_roce_conn_st_ctx pstorm_st_context; + struct xstorm_roce_conn_st_ctx xstorm_st_context; + struct xstorm_roce_conn_ag_ctx xstorm_ag_context; + struct tstorm_roce_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_roce_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_roce_conn_st_ctx mstorm_st_context; + struct regpair mstorm_st_padding[2]; + struct ustorm_roce_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +/* roce cqes statistics */ +struct roce_cqe_stats { + __le32 req_cqe_error; + __le32 req_remote_access_errors; + __le32 req_remote_invalid_request; + __le32 resp_cqe_error; + __le32 resp_local_length_error; + __le32 reserved; +}; + +/* roce create qp requester ramrod data */ +struct roce_create_qp_req_ramrod_data { + __le16 flags; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG_SHIFT 7 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + u8 orq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + __le32 ack_timeout_val; + __le32 initial_psn; + __le16 mtu; + __le16 pd; + __le16 sq_num_pages; + __le16 low_latency_phy_queue; + struct regpair sq_pbl_addr; + struct regpair orq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + __le32 cq_cid; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + u8 stats_counter_id; + u8 vf_id; + u8 vport_id; + u8 flags2; +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2 +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F +#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3 + u8 name_space; + u8 reserved3[3]; + __le16 regular_latency_phy_queue; + __le16 dpi; +}; + +/* roce create qp responder ramrod data */ +struct roce_create_qp_resp_ramrod_data { + __le32 flags; +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN_SHIFT 7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18 +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF +#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19 + __le16 xrc_domain; + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + u8 irq_num_pages; + __le16 p_key; + __le32 flow_label; + __le32 dst_qp_id; + u8 stats_counter_id; + u8 reserved1; + __le16 mtu; + __le32 initial_psn; + __le16 pd; + __le16 rq_num_pages; + struct rdma_srq_id srq_id; + struct regpair rq_pbl_addr; + struct regpair irq_pbl_addr; + __le16 local_mac_addr[3]; + __le16 remote_mac_addr[3]; + __le16 vlan_id; + __le16 udp_src_port; + __le32 src_gid[4]; + __le32 dst_gid[4]; + struct regpair qp_handle_for_cqe; + struct regpair qp_handle_for_async; + __le16 low_latency_phy_queue; + u8 vf_id; + u8 vport_id; + __le32 cq_cid; + __le16 regular_latency_phy_queue; + __le16 dpi; + __le32 src_qp_id; + u8 name_space; + u8 reserved3[3]; +}; + +/* RoCE Create Suspended qp requester runtime ramrod data */ +struct roce_create_suspended_qp_req_runtime_ramrod_data { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \ + 0x7FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; +}; + +/* RoCE Create Suspended QP requester ramrod data */ +struct roce_create_suspended_qp_req_ramrod_data { + struct roce_create_qp_req_ramrod_data qp_params; + struct roce_create_suspended_qp_req_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE Create Suspended QP responder runtime params */ +struct roce_create_suspended_qp_resp_runtime_params { + __le32 flags; +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; + __le32 resreved; +}; + +/* RoCE RDB array entry */ +struct roce_resp_qp_rdb_entry { + struct regpair atomic_data; + struct regpair va; + __le32 psn; + __le32 rkey; + __le32 byte_count; + u8 op_type; + u8 reserved[3]; +}; + +/* RoCE Create Suspended QP responder runtime ramrod data */ +struct roce_create_suspended_qp_resp_runtime_ramrod_data { + struct roce_create_suspended_qp_resp_runtime_params params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Create Suspended QP responder ramrod data */ +struct roce_create_suspended_qp_resp_ramrod_data { + struct roce_create_qp_resp_ramrod_data + qp_params; + struct roce_create_suspended_qp_resp_runtime_ramrod_data + qp_runtime_params; +}; + +/* RoCE create ud qp ramrod data */ +struct roce_create_ud_qp_ramrod_data { + __le16 local_mac_addr[3]; + __le16 vlan_id; + __le32 src_qp_id; + u8 name_space; + u8 reserved[3]; +}; + +/* roce DCQCN received statistics */ +struct roce_dcqcn_received_stats { + struct regpair ecn_pkt_rcv; + struct regpair cnp_pkt_rcv; + struct regpair cnp_pkt_reject; +}; + +/* roce DCQCN sent statistics */ +struct roce_dcqcn_sent_stats { + struct regpair cnp_pkt_sent; +}; + +/* RoCE destroy qp requester output params */ +struct roce_destroy_qp_req_output_params { + __le32 cq_prod; + __le32 reserved; +}; + +/* RoCE destroy qp requester ramrod data */ +struct roce_destroy_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE destroy qp responder output params */ +struct roce_destroy_qp_resp_output_params { + __le32 cq_prod; + __le32 reserved; +}; + +/* RoCE destroy qp responder ramrod data */ +struct roce_destroy_qp_resp_ramrod_data { + struct regpair output_params_addr; + __le32 src_qp_id; + __le32 reserved; +}; + +/* RoCE destroy ud qp ramrod data */ +struct roce_destroy_ud_qp_ramrod_data { + __le32 src_qp_id; + __le32 reserved; +}; + +/* roce error statistics */ +struct roce_error_stats { + __le32 resp_remote_access_errors; + __le32 reserved; +}; + +/* roce special events statistics */ +struct roce_events_stats { + __le32 silent_drops; + __le32 rnr_naks_sent; + __le32 retransmit_count; + __le32 icrc_error_count; + __le32 implied_nak_seq_err; + __le32 duplicate_request; + __le32 local_ack_timeout_err; + __le32 out_of_sequence; + __le32 packet_seq_err; + __le32 rnr_nak_retry_err; +}; + +/* roce slow path EQ cmd IDs */ +enum roce_event_opcode { + ROCE_EVENT_CREATE_QP = 13, + ROCE_EVENT_MODIFY_QP, + ROCE_EVENT_QUERY_QP, + ROCE_EVENT_DESTROY_QP, + ROCE_EVENT_CREATE_UD_QP, + ROCE_EVENT_DESTROY_UD_QP, + ROCE_EVENT_FUNC_UPDATE, + ROCE_EVENT_SUSPEND_QP, + ROCE_EVENT_QUERY_SUSPENDED_QP, + ROCE_EVENT_CREATE_SUSPENDED_QP, + ROCE_EVENT_RESUME_QP, + ROCE_EVENT_SUSPEND_UD_QP, + ROCE_EVENT_RESUME_UD_QP, + ROCE_EVENT_CREATE_SUSPENDED_UD_QP, + ROCE_EVENT_FLUSH_DPT_QP, + MAX_ROCE_EVENT_OPCODE +}; + +/* roce func init ramrod data */ +struct roce_init_func_params { + u8 ll2_queue_id; + u8 cnp_vlan_priority; + u8 cnp_dscp; + u8 flags; +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_INIT_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_MASK 0x3F +#define ROCE_INIT_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; + __le16 rl_offset; + u8 rl_count_log; + u8 reserved1[5]; +}; + +/* roce func init ramrod data */ +struct roce_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; + struct roce_init_func_params roce; +}; + +/* roce_ll2_cqe_data */ +struct roce_ll2_cqe_data { + u8 name_space; + u8 flags; +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1 +#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0 +#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F +#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1 + u8 reserved1[2]; + __le32 cid; +}; + +/* roce modify qp requester ramrod data */ +struct roce_modify_qp_req_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15 + u8 fields; +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0 +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF +#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4 + u8 max_ord; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le32 ack_timeout_val; + __le16 mtu; + __le16 reserved2; + __le32 reserved3[2]; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +/* roce modify qp responder ramrod data */ +struct roce_modify_qp_resp_ramrod_data { + __le16 flags; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12 + u8 fields; +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0 +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F +#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3 + u8 max_ird; + u8 traffic_class; + u8 hop_limit; + __le16 p_key; + __le32 flow_label; + __le16 mtu; + __le16 low_latency_phy_queue; + __le16 regular_latency_phy_queue; + u8 reserved2[6]; + __le32 src_gid[4]; + __le32 dst_gid[4]; +}; + +/* RoCE query qp requester output params */ +struct roce_query_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1 +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2 +}; + +/* RoCE query qp requester ramrod data */ +struct roce_query_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE query qp responder output params */ +struct roce_query_qp_resp_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 +}; + +/* RoCE query qp responder ramrod data */ +struct roce_query_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE Query Suspended QP requester output params */ +struct roce_query_suspended_qp_req_output_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + __le32 send_msg_psn; + __le32 inflight_sends; + __le32 ssn; + __le32 reserved; +}; + +/* RoCE Query Suspended QP requester ramrod data */ +struct roce_query_suspended_qp_req_ramrod_data { + struct regpair output_params_addr; +}; + +/* RoCE Query Suspended QP responder runtime params */ +struct roce_query_suspended_qp_resp_runtime_params { + __le32 psn; + __le32 flags; +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1 +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF +#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2 + __le32 receive_msg_psn; + __le32 inflight_receives; + __le32 rmsn; + __le32 rdma_key; + struct regpair rdma_va; + __le32 rdma_length; + __le32 num_rdb_entries; +}; + +/* RoCE Query Suspended QP responder output params */ +struct roce_query_suspended_qp_resp_output_params { + struct roce_query_suspended_qp_resp_runtime_params runtime_params; + struct roce_resp_qp_rdb_entry + rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE]; +}; + +/* RoCE Query Suspended QP responder ramrod data */ +struct roce_query_suspended_qp_resp_ramrod_data { + struct regpair output_params_addr; +}; + +/* ROCE ramrod command IDs */ +enum roce_ramrod_cmd_id { + ROCE_RAMROD_CREATE_QP = 13, + ROCE_RAMROD_MODIFY_QP, + ROCE_RAMROD_QUERY_QP, + ROCE_RAMROD_DESTROY_QP, + ROCE_RAMROD_CREATE_UD_QP, + ROCE_RAMROD_DESTROY_UD_QP, + ROCE_RAMROD_FUNC_UPDATE, + ROCE_RAMROD_SUSPEND_QP, + ROCE_RAMROD_QUERY_SUSPENDED_QP, + ROCE_RAMROD_CREATE_SUSPENDED_QP, + ROCE_RAMROD_RESUME_QP, + ROCE_RAMROD_SUSPEND_UD_QP, + ROCE_RAMROD_RESUME_UD_QP, + ROCE_RAMROD_CREATE_SUSPENDED_UD_QP, + ROCE_RAMROD_FLUSH_DPT_QP, + MAX_ROCE_RAMROD_CMD_ID +}; + +/* ROCE RDB array entry type */ +enum roce_resp_qp_rdb_entry_type { + ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0, + ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1, + ROCE_QP_RDB_ENTRY_INVALID = 2, + MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE +}; + +/* RoCE func init ramrod data */ +struct roce_update_func_params { + u8 cnp_vlan_priority; + u8 cnp_dscp; + __le16 flags; +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_NP_EN_SHIFT 0 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_MASK 0x1 +#define ROCE_UPDATE_FUNC_PARAMS_DCQCN_RP_EN_SHIFT 1 +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_MASK 0x3FFF +#define ROCE_UPDATE_FUNC_PARAMS_RESERVED0_SHIFT 2 + __le32 cnp_send_timeout; +}; + +struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0; + u8 state; + u8 flags0; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7 + u8 flags1; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSDM_FLUSH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MSEM_FLUSH_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6 + u8 flags7; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7 + u8 flags10; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7 + u8 flags11; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3 +#define E4XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 snd_nxt_psn; + __le32 reg4; +}; + +struct mstorm_roce_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct mstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +struct tstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 dif_rxmit_cnt; + __le32 snd_nxt_psn; + __le32 snd_max_psn; + __le32 orq_prod; + __le32 reg4; + __le32 dif_acked_cnt; + __le32 dif_cnt; + __le32 reg7; + __le32 reg8; + u8 tx_cqe_error_type; + u8 orq_cache_idx; + __le16 snd_sq_cons_th; + u8 byte4; + u8 byte5; + __le16 snd_sq_cons; + __le16 conn_dpi; + __le16 force_comp_cons; + __le32 dif_rxmit_acked_cnt; + __le32 reg10; +}; + +struct tstorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 state; + u8 flags0; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags2; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 psn_and_rxmit_id_echo; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 tx_async_error_type; + u8 byte3; + __le16 rq_cons; + u8 byte4; + u8 byte5; + __le16 rq_prod; + __le16 conn_dpi; + __le16 irq_cons; + __le32 reg9; + __le32 reg10; +}; + +struct ustorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ustorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct xstorm_roce_req_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 sq_cmp_cons; + __le16 sq_cons; + __le16 sq_prod; + __le16 dif_error_first_sq_cons; + __le16 conn_dpi; + u8 dif_error_sge_index; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 lsn; + __le32 ssn; + __le32 snd_una_psn; + __le32 snd_nxt_psn; + __le32 dif_error_offset; + __le32 orq_cons_th; + __le32 orq_cons; +}; + +struct xstorm_roce_resp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7 + u8 flags2; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6 + u8 flags4; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6 + u8 flags7; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7 + u8 flags11; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 irq_prod_shadow; + __le16 word2; + __le16 irq_cons; + __le16 irq_prod; + __le16 e5_reserved1; + __le16 conn_dpi; + u8 rxmit_opcode; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 rxmit_psn_and_id; + __le32 rxmit_bytes_length; + __le32 psn; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 msn_and_syndrome; +}; + +struct ystorm_roce_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_roce_req_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +struct ystorm_roce_resp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/* Roce doorbell data */ +enum roce_flavor { + PLAIN_ROCE, + RROCE_IPV4, + RROCE_IPV6, + MAX_ROCE_FLAVOR +}; + +/* The iwarp storm context of Ystorm */ +struct ystorm_iwarp_conn_st_ctx { + __le32 reserved[4]; +}; + +/* The iwarp storm context of Pstorm */ +struct pstorm_iwarp_conn_st_ctx { + __le32 reserved[36]; +}; + +/* The iwarp storm context of Xstorm */ +struct xstorm_iwarp_conn_st_ctx { + __le32 reserved[48]; +}; + +struct xstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7 + u8 flags2; +#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1 +#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3 +#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 sq_comp_cons; + __le16 sq_tx_cons; + __le16 sq_prod; + __le16 word5; + __le16 conn_dpi; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 rewinded_snd_max_or_term_opcode; + __le32 rd_msn; + __le16 irq_prod_via_msdm; + __le16 irq_cons; + __le16 hq_cons_th_or_mpa_data; + __le16 hq_cons; + __le32 atom_msn; + __le32 orq_cons; + __le32 orq_cons_th; + u8 byte7; + u8 wqe_data_pad_bytes; + u8 max_ord; + u8 former_hq_prod; + u8 irq_prod_via_msem; + u8 byte12; + u8 max_pkt_pdu_size_lo; + u8 max_pkt_pdu_size_hi; + u8 byte15; + u8 e5_reserved; + __le16 e5_reserved4; + __le32 reg10; + __le32 reg11; + __le32 shared_queue_page_addr_lo; + __le32 shared_queue_page_addr_hi; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iwarp_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 unaligned_nxt_seq; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 orq_cache_idx; + u8 hq_prod; + __le16 sq_tx_cons_th; + u8 orq_prod; + u8 irq_cons; + __le16 sq_tx_cons; + __le16 conn_dpi; + __le16 rq_prod; + __le32 snd_seq; + __le32 last_hq_sequence; +}; + +/* The iwarp storm context of Tstorm */ +struct tstorm_iwarp_conn_st_ctx { + __le32 reserved[60]; +}; + +/* The iwarp storm context of Mstorm */ +struct mstorm_iwarp_conn_st_ctx { + __le32 reserved[32]; +}; + +/* The iwarp storm context of Ustorm */ +struct ustorm_iwarp_conn_st_ctx { + struct regpair reserved[14]; +}; + +/* iwarp connection context */ +struct iwarp_conn_context { + struct ystorm_iwarp_conn_st_ctx ystorm_st_context; + struct regpair ystorm_st_padding[2]; + struct pstorm_iwarp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_iwarp_conn_st_ctx xstorm_st_context; + struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context; + struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context; + struct timers_context timer_context; + struct ustorm_rdma_conn_ag_ctx ustorm_ag_context; + struct tstorm_iwarp_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iwarp_conn_st_ctx mstorm_st_context; + struct ustorm_iwarp_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; +}; + +/* iWARP create QP params passed by driver to FW in CreateQP Request Ramrod */ +struct iwarp_create_qp_ramrod_data { + u8 flags; +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 0 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP_SHIFT 1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 4 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG_SHIFT 5 +#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_LOW_LATENCY_QUEUE_EN_SHIFT 6 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_MASK 0x1 +#define IWARP_CREATE_QP_RAMROD_DATA_RESERVED0_SHIFT 7 + u8 reserved1; + __le16 pd; + __le16 sq_num_pages; + __le16 rq_num_pages; + __le32 reserved3[2]; + struct regpair qp_handle_for_cqe; + struct rdma_srq_id srq_id; + __le32 cq_cid_for_sq; + __le32 cq_cid_for_rq; + __le16 dpi; + __le16 physical_q0; + __le16 physical_q1; + u8 reserved2[6]; +}; + +/* iWARP completion queue types */ +enum iwarp_eqe_async_opcode { + IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED, + IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE, + IWARP_EVENT_TYPE_ASYNC_CID_CLEANED, + IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED, + IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE, + IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW, + IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT, + IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY, + MAX_IWARP_EQE_ASYNC_OPCODE +}; + +struct iwarp_eqe_data_mpa_async_completion { + __le16 ulp_data_len; + u8 rtr_type_sent; + u8 reserved[5]; +}; + +struct iwarp_eqe_data_tcp_async_completion { + __le16 ulp_data_len; + u8 mpa_handshake_mode; + u8 reserved[5]; +}; + +/* iWARP completion queue types */ +enum iwarp_eqe_sync_opcode { + IWARP_EVENT_TYPE_TCP_OFFLOAD = 13, + IWARP_EVENT_TYPE_MPA_OFFLOAD, + IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR, + IWARP_EVENT_TYPE_CREATE_QP, + IWARP_EVENT_TYPE_QUERY_QP, + IWARP_EVENT_TYPE_MODIFY_QP, + IWARP_EVENT_TYPE_DESTROY_QP, + IWARP_EVENT_TYPE_ABORT_TCP_OFFLOAD, + MAX_IWARP_EQE_SYNC_OPCODE +}; + +/* iWARP EQE completion status */ +enum iwarp_fw_return_code { + IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET = 6, + IWARP_CONN_ERROR_TCP_CONNECTION_RST, + IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT, + IWARP_CONN_ERROR_MPA_ERROR_REJECT, + IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER, + IWARP_CONN_ERROR_MPA_RST, + IWARP_CONN_ERROR_MPA_FIN, + IWARP_CONN_ERROR_MPA_RTR_MISMATCH, + IWARP_CONN_ERROR_MPA_INSUF_IRD, + IWARP_CONN_ERROR_MPA_INVALID_PACKET, + IWARP_CONN_ERROR_MPA_LOCAL_ERROR, + IWARP_CONN_ERROR_MPA_TIMEOUT, + IWARP_CONN_ERROR_MPA_TERMINATE, + IWARP_QP_IN_ERROR_GOOD_CLOSE, + IWARP_QP_IN_ERROR_BAD_CLOSE, + IWARP_EXCEPTION_DETECTED_LLP_CLOSED, + IWARP_EXCEPTION_DETECTED_LLP_RESET, + IWARP_EXCEPTION_DETECTED_IRQ_FULL, + IWARP_EXCEPTION_DETECTED_RQ_EMPTY, + IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT, + IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR, + IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW, + IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC, + IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR, + IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR, + IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED, + MAX_IWARP_FW_RETURN_CODE +}; + +/* unaligned opaque data received from LL2 */ +struct iwarp_init_func_params { + u8 ll2_ooo_q_index; + u8 reserved1[7]; +}; + +/* iwarp func init ramrod data */ +struct iwarp_init_func_ramrod_data { + struct rdma_init_func_ramrod_data rdma; + struct tcp_init_params tcp; + struct iwarp_init_func_params iwarp; +}; + +/* iWARP QP - possible states to transition to */ +enum iwarp_modify_qp_new_state_type { + IWARP_MODIFY_QP_STATE_CLOSING = 1, + IWARP_MODIFY_QP_STATE_ERROR = 2, + MAX_IWARP_MODIFY_QP_NEW_STATE_TYPE +}; + +/* iwarp modify qp responder ramrod data */ +struct iwarp_modify_qp_ramrod_data { + __le16 transition_to_state; + __le16 flags; +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_RD_EN_SHIFT 0 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_WR_EN_SHIFT 1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_ATOMIC_EN_SHIFT 2 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN_SHIFT 3 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 4 +#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1 +#define IWARP_MODIFY_QP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 5 +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_MASK 0x3FF +#define IWARP_MODIFY_QP_RAMROD_DATA_RESERVED_SHIFT 6 + __le16 physical_q0; + __le16 physical_q1; + __le32 reserved1[10]; +}; + +/* MPA params for Enhanced mode */ +struct mpa_rq_params { + __le32 ird; + __le32 ord; +}; + +/* MPA host Address-Len for private data */ +struct mpa_ulp_buffer { + struct regpair addr; + __le16 len; + __le16 reserved[3]; +}; + +/* iWARP MPA offload params common to Basic and Enhanced modes */ +struct mpa_outgoing_params { + u8 crc_needed; + u8 reject; + u8 reserved[6]; + struct mpa_rq_params out_rq; + struct mpa_ulp_buffer outgoing_ulp_buffer; +}; + +/* iWARP MPA offload params passed by driver to FW in MPA Offload Request + * Ramrod. + */ +struct iwarp_mpa_offload_ramrod_data { + struct mpa_outgoing_params common; + __le32 tcp_cid; + u8 mode; + u8 tcp_connect_side; + u8 rtr_pref; +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_MASK 0x7 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED_SHIFT 0 +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_MASK 0x1F +#define IWARP_MPA_OFFLOAD_RAMROD_DATA_RESERVED1_SHIFT 3 + u8 reserved2; + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + struct regpair shared_queue_addr; + __le32 additional_setup_time; + __le16 rcv_wnd; + u8 stats_counter_id; + u8 reserved3[9]; +}; + +/* iWARP TCP connection offload params passed by driver to FW */ +struct iwarp_offload_params { + struct mpa_ulp_buffer incoming_ulp_buffer; + struct regpair async_eqe_output_buf; + struct regpair handle_for_async; + __le32 additional_setup_time; + __le16 physical_q0; + __le16 physical_q1; + u8 stats_counter_id; + u8 mpa_mode; + u8 src_vport_id; + u8 reserved[5]; +}; + +/* iWARP query QP output params */ +struct iwarp_query_qp_output_params { + __le32 flags; +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0 +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF +#define IWARP_QUERY_QP_OUTPUT_PARAMS_RESERVED0_SHIFT 1 + u8 reserved1[4]; +}; + +/* iWARP query QP ramrod data */ +struct iwarp_query_qp_ramrod_data { + struct regpair output_params_addr; +}; + +/* iWARP Ramrod Command IDs */ +enum iwarp_ramrod_cmd_id { + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + IWARP_RAMROD_CMD_ID_CREATE_QP, + IWARP_RAMROD_CMD_ID_QUERY_QP, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD, + MAX_IWARP_RAMROD_CMD_ID +}; + +/* Per PF iWARP retransmit path statistics */ +struct iwarp_rxmit_stats_drv { + struct regpair tx_go_to_slow_start_event_cnt; + struct regpair tx_fast_retransmit_event_cnt; +}; + +/* iWARP and TCP connection offload params passed by driver to FW in iWARP + * offload ramrod. + */ +struct iwarp_tcp_offload_ramrod_data { + struct tcp_offload_params_opt2 tcp; + struct iwarp_offload_params iwarp; +}; + +/* iWARP MPA negotiation types */ +enum mpa_negotiation_mode { + MPA_NEGOTIATION_TYPE_BASIC = 1, + MPA_NEGOTIATION_TYPE_ENHANCED = 2, + MAX_MPA_NEGOTIATION_MODE +}; + +/* iWARP MPA Enhanced mode RTR types */ +enum mpa_rtr_type { + MPA_RTR_TYPE_NONE = 0, + MPA_RTR_TYPE_ZERO_SEND = 1, + MPA_RTR_TYPE_ZERO_WRITE = 2, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE = 3, + MPA_RTR_TYPE_ZERO_READ = 4, + MPA_RTR_TYPE_ZERO_SEND_AND_READ = 5, + MPA_RTR_TYPE_ZERO_WRITE_AND_READ = 6, + MPA_RTR_TYPE_ZERO_SEND_AND_WRITE_AND_READ = 7, + MAX_MPA_RTR_TYPE +}; + +/* unaligned opaque data received from LL2 */ +struct unaligned_opaque_data { + __le16 first_mpa_offset; + u8 tcp_payload_offset; + u8 flags; +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE_SHIFT 0 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_MASK 0x1 +#define UNALIGNED_OPAQUE_DATA_CONNECTION_CLOSED_SHIFT 1 +#define UNALIGNED_OPAQUE_DATA_RESERVED_MASK 0x3F +#define UNALIGNED_OPAQUE_DATA_RESERVED_SHIFT 2 + __le32 cid; +}; + +struct mstorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 rcq_cons; + __le16 rcq_cons_th; + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_iwarp_conn_ag_ctx { + u8 reserved; + u8 byte1; + u8 flags0; +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7 + u8 flags3; +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 cq_cons; + __le32 cq_se_prod; + __le32 cq_prod; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +struct ystorm_iwarp_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/* The fcoe storm context of Ystorm */ +struct ystorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 eth_hdr_size; + __le16 stat_ram_addr; + __le16 mtu; + __le16 max_fc_payload_len; + __le16 tx_max_fc_pay_len; + u8 fcp_cmd_size; + u8 fcp_rsp_size; + __le16 mss; + struct regpair reserved; + __le16 min_frame_size; + u8 protection_info_flags; +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RESERVED1_SHIFT 2 + u8 dst_protection_per_mss; + u8 src_protection_per_mss; + u8 ptu_log_page_size; + u8 flags; +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 0 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define YSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 1 +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x3F +#define YSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 2 + u8 fcp_xfer_size; +}; + +/* FCoE 16-bits vlan structure */ +struct fcoe_vlan_fields { + __le16 fields; +#define FCOE_VLAN_FIELDS_VID_MASK 0xFFF +#define FCOE_VLAN_FIELDS_VID_SHIFT 0 +#define FCOE_VLAN_FIELDS_CLI_MASK 0x1 +#define FCOE_VLAN_FIELDS_CLI_SHIFT 12 +#define FCOE_VLAN_FIELDS_PRI_MASK 0x7 +#define FCOE_VLAN_FIELDS_PRI_SHIFT 13 +}; + +/* FCoE 16-bits vlan union */ +union fcoe_vlan_field_union { + struct fcoe_vlan_fields fields; + __le16 val; +}; + +/* FCoE 16-bits vlan, vif union */ +union fcoe_vlan_vif_field_union { + union fcoe_vlan_field_union vlan; + __le16 vif; +}; + +/* Ethernet context section */ +struct pstorm_fcoe_eth_context_section { + u8 remote_addr_3; + u8 remote_addr_2; + u8 remote_addr_1; + u8 remote_addr_0; + u8 local_addr_1; + u8 local_addr_0; + u8 remote_addr_5; + u8 remote_addr_4; + u8 local_addr_5; + u8 local_addr_4; + u8 local_addr_3; + u8 local_addr_2; + union fcoe_vlan_vif_field_union vif_outer_vlan; + __le16 vif_outer_eth_type; + union fcoe_vlan_vif_field_union inner_vlan; + __le16 inner_eth_type; +}; + +/* The fcoe storm context of Pstorm */ +struct pstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 cos; + u8 conf_version; + u8 rsrv; + __le16 stat_ram_addr; + __le16 mss; + struct regpair abts_cleanup_addr; + struct pstorm_fcoe_eth_context_section eth; + u8 sid_2; + u8 sid_1; + u8 sid_0; + u8 flags; +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_VNTAG_VLAN_SHIFT 0 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SUPPORT_REC_RR_TOV_SHIFT 1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 2 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_OUTER_VLAN_FLAG_SHIFT 3 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_MASK 0x1 +#define PSTORM_FCOE_CONN_ST_CTX_SINGLE_VLAN_FLAG_SHIFT 4 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x7 +#define PSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 5 + u8 did_2; + u8 did_1; + u8 did_0; + u8 src_mac_index; + __le16 rec_rr_tov_val; + u8 q_relative_offset; + u8 reserved1; +}; + +/* The fcoe storm context of Xstorm */ +struct xstorm_fcoe_conn_st_ctx { + u8 func_mode; + u8 src_mac_index; + u8 conf_version; + u8 cached_wqes_avail; + __le16 stat_ram_addr; + u8 flags; +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SQ_DEFERRED_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_INNER_VLAN_FLAG_ORIG_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_MASK 0x3 +#define XSTORM_FCOE_CONN_ST_CTX_LAST_QUEUE_HANDLED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_MASK 0x7 +#define XSTORM_FCOE_CONN_ST_CTX_RSRV_SHIFT 5 + u8 cached_wqes_offset; + u8 reserved2; + u8 eth_hdr_size; + u8 seq_id; + u8 max_conc_seqs; + __le16 num_pages_in_pbl; + __le16 reserved; + struct regpair sq_pbl_addr; + struct regpair sq_curr_page_addr; + struct regpair sq_next_page_addr; + struct regpair xferq_pbl_addr; + struct regpair xferq_curr_page_addr; + struct regpair xferq_next_page_addr; + struct regpair respq_pbl_addr; + struct regpair respq_curr_page_addr; + struct regpair respq_next_page_addr; + __le16 mtu; + __le16 tx_max_fc_pay_len; + __le16 max_fc_payload_len; + __le16 min_frame_size; + __le16 sq_pbl_next_index; + __le16 respq_pbl_next_index; + u8 fcp_cmd_byte_credit; + u8 fcp_rsp_byte_credit; + __le16 protection_info; +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_PROTECTION_PERF_SHIFT 0 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_SUPPORT_PROTECTION_SHIFT 1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_VALID_SHIFT 2 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_MASK 0x1 +#define XSTORM_FCOE_CONN_ST_CTX_FRAME_PROT_ALIGNED_SHIFT 3 +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_MASK 0xF +#define XSTORM_FCOE_CONN_ST_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_MASK 0xFF +#define XSTORM_FCOE_CONN_ST_CTX_DST_PROTECTION_PER_MSS_SHIFT 8 + __le16 xferq_pbl_next_index; + __le16 page_size; + u8 mid_seq; + u8 fcp_xfer_byte_credit; + u8 reserved1[2]; + struct fcoe_wqe cached_wqes[16]; +}; + +struct xstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7 + u8 flags2; +#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6 + u8 flags7; +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7 + u8 flags12; +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 word1; + __le16 word2; + __le16 sq_cons; + __le16 sq_prod; + __le16 xferq_prod; + __le16 xferq_cons; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 remain_io; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le16 respq_prod; + __le16 respq_cons; + __le16 word9; + __le16 word10; + __le32 reg7; + __le32 reg8; +}; + +/* The fcoe storm context of Ustorm */ +struct ustorm_fcoe_conn_st_ctx { + struct regpair respq_pbl_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 respq_prod; + u8 reserved[2]; +}; + +struct tstorm_fcoe_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6 + u8 flags1; +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7 + u8 flags4; +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; +}; + +struct ustorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The fcoe storm context of Tstorm */ +struct tstorm_fcoe_conn_st_ctx { + __le16 stat_ram_addr; + __le16 rx_max_fc_payload_len; + __le16 e_d_tov_val; + u8 flags; +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_INC_SEQ_CNT_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_MASK 0x1 +#define TSTORM_FCOE_CONN_ST_CTX_SUPPORT_CONF_SHIFT 1 +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_DEF_Q_IDX_SHIFT 2 + u8 timers_cleanup_invocation_cnt; + __le32 reserved1[2]; + __le32 dst_mac_address_bytes_0_to_3; + __le16 dst_mac_address_bytes_4_to_5; + __le16 ramrod_echo; + u8 flags1; +#define TSTORM_FCOE_CONN_ST_CTX_MODE_MASK 0x3 +#define TSTORM_FCOE_CONN_ST_CTX_MODE_SHIFT 0 +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_MASK 0x3F +#define TSTORM_FCOE_CONN_ST_CTX_RESERVED_SHIFT 2 + u8 cq_relative_offset; + u8 cmdq_relative_offset; + u8 bdq_resource_id; + u8 reserved0[4]; +}; + +struct mstorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* Fast path part of the fcoe storm context of Mstorm */ +struct fcoe_mstorm_fcoe_conn_st_ctx_fp { + __le16 xfer_prod; + u8 num_cqs; + u8 reserved1; + u8 protection_info; +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_SUPPORT_PROTECTION_SHIFT 0 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_MASK 0x1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_VALID_SHIFT 1 +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_MASK 0x3F +#define FCOE_MSTORM_FCOE_CONN_ST_CTX_FP_RESERVED0_SHIFT 2 + u8 q_relative_offset; + u8 reserved2[2]; +}; + +/* Non fast path part of the fcoe storm context of Mstorm */ +struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp { + __le16 conn_id; + __le16 stat_ram_addr; + __le16 num_pages_in_pbl; + u8 ptu_log_page_size; + u8 log_page_size; + __le16 unsolicited_cq_count; + __le16 cmdq_count; + u8 bdq_resource_id; + u8 reserved0[3]; + struct regpair xferq_pbl_addr; + struct regpair reserved1; + struct regpair reserved2[3]; +}; + +/* The fcoe storm context of Mstorm */ +struct mstorm_fcoe_conn_st_ctx { + struct fcoe_mstorm_fcoe_conn_st_ctx_fp fp; + struct fcoe_mstorm_fcoe_conn_st_ctx_non_fp non_fp; +}; + +/* fcoe connection context */ +struct fcoe_conn_context { + struct ystorm_fcoe_conn_st_ctx ystorm_st_context; + struct pstorm_fcoe_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct xstorm_fcoe_conn_st_ctx xstorm_st_context; + struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context; + struct regpair xstorm_ag_padding[6]; + struct ustorm_fcoe_conn_st_ctx ustorm_st_context; + struct regpair ustorm_st_padding[2]; + struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context; + struct tstorm_fcoe_conn_st_ctx tstorm_st_context; + struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context; + struct mstorm_fcoe_conn_st_ctx mstorm_st_context; +}; + +/* FCoE connection offload params passed by driver to FW in FCoE offload + * ramrod. + */ +struct fcoe_conn_offload_ramrod_params { + struct fcoe_conn_offload_ramrod_data offload_ramrod_data; +}; + +/* FCoE connection terminate params passed by driver to FW in FCoE terminate + * conn ramrod. + */ +struct fcoe_conn_terminate_ramrod_params { + struct fcoe_conn_terminate_ramrod_data terminate_ramrod_data; +}; + +/* FCoE event type */ +enum fcoe_event_type { + FCOE_EVENT_INIT_FUNC, + FCOE_EVENT_DESTROY_FUNC, + FCOE_EVENT_STAT_FUNC, + FCOE_EVENT_OFFLOAD_CONN, + FCOE_EVENT_TERMINATE_CONN, + FCOE_EVENT_ERROR, + MAX_FCOE_EVENT_TYPE +}; + +/* FCoE init params passed by driver to FW in FCoE init ramrod */ +struct fcoe_init_ramrod_params { + struct fcoe_init_func_ramrod_data init_ramrod_data; +}; + +/* FCoE ramrod Command IDs */ +enum fcoe_ramrod_cmd_id { + FCOE_RAMROD_CMD_ID_INIT_FUNC, + FCOE_RAMROD_CMD_ID_DESTROY_FUNC, + FCOE_RAMROD_CMD_ID_STAT_FUNC, + FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, + FCOE_RAMROD_CMD_ID_TERMINATE_CONN, + MAX_FCOE_RAMROD_CMD_ID +}; + +/* FCoE statistics params buffer passed by driver to FW in FCoE statistics + * ramrod. + */ +struct fcoe_stat_ramrod_params { + struct fcoe_stat_ramrod_data stat_ramrod_data; +}; + +struct ystorm_fcoe_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +/* The iscsi storm connection context of Ystorm */ +struct ystorm_iscsi_conn_st_ctx { + __le32 reserved[8]; +}; + +/* Combined iSCSI and TCP storm connection of Pstorm */ +struct pstorm_iscsi_tcp_conn_st_ctx { + __le32 tcp[32]; + __le32 iscsi[4]; +}; + +/* The combined tcp and iscsi storm context of Xstorm */ +struct xstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_tcp[4]; + __le32 reserved_iscsi[44]; +}; + +struct xstorm_iscsi_conn_ag_ctx { + u8 cdu_validation; + u8 state; + u8 flags0; +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7 + u8 flags1; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7 + u8 flags2; +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6 + u8 flags3; +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6 + u8 flags6; +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6 + u8 flags7; +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7 + u8 flags11; +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1 +#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3 +#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6 + u8 byte2; + __le16 physical_q0; + __le16 physical_q1; + __le16 dummy_dorq_var; + __le16 sq_cons; + __le16 sq_prod; + __le16 word5; + __le16 slow_io_total_data_tx_update; + u8 byte3; + u8 byte4; + u8 byte5; + u8 byte6; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 more_to_send_seq; + __le32 reg4; + __le32 reg5; + __le32 hq_scan_next_relevant_ack; + __le16 r2tq_prod; + __le16 r2tq_cons; + __le16 hq_prod; + __le16 hq_cons; + __le32 remain_seq; + __le32 bytes_to_next_pdu; + __le32 hq_tcp_seq; + u8 byte7; + u8 byte8; + u8 byte9; + u8 byte10; + u8 byte11; + u8 byte12; + u8 byte13; + u8 byte14; + u8 byte15; + u8 e5_reserved; + __le16 word11; + __le32 reg10; + __le32 reg11; + __le32 exp_stat_sn; + __le32 ongoing_fast_rxmit_seq; + __le32 reg14; + __le32 reg15; + __le32 reg16; + __le32 reg17; +}; + +struct tstorm_iscsi_conn_ag_ctx { + u8 reserved0; + u8 state; + u8 flags0; +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7 + u8 flags4; +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0; + __le32 reg1; + __le32 rx_tcp_checksum_err_cnt; + __le32 reg3; + __le32 reg4; + __le32 reg5; + __le32 reg6; + __le32 reg7; + __le32 reg8; + u8 cid_offload_cnt; + u8 byte3; + __le16 word0; +}; + +struct ustorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; + __le32 reg2; + __le32 reg3; + __le16 word2; + __le16 word3; +}; + +/* The iscsi storm connection context of Tstorm */ +struct tstorm_iscsi_conn_st_ctx { + __le32 reserved[44]; +}; + +struct mstorm_iscsi_conn_ag_ctx { + u8 reserved; + u8 state; + u8 flags0; +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0; + __le16 word1; + __le32 reg0; + __le32 reg1; +}; + +/* Combined iSCSI and TCP storm connection of Mstorm */ +struct mstorm_iscsi_tcp_conn_st_ctx { + __le32 reserved_tcp[20]; + __le32 reserved_iscsi[12]; +}; + +/* The iscsi storm context of Ustorm */ +struct ustorm_iscsi_conn_st_ctx { + __le32 reserved[52]; +}; + +/* iscsi connection context */ +struct iscsi_conn_context { + struct ystorm_iscsi_conn_st_ctx ystorm_st_context; + struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context; + struct regpair pstorm_st_padding[2]; + struct pb_context xpb2_context; + struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context; + struct regpair xstorm_st_padding[2]; + struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context; + struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context; + struct regpair tstorm_ag_padding[2]; + struct timers_context timer_context; + struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context; + struct pb_context upb_context; + struct tstorm_iscsi_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; + struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context; + struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context; + struct ustorm_iscsi_conn_st_ctx ustorm_st_context; +}; + +/* iSCSI init params passed by driver to FW in iSCSI init ramrod */ +struct iscsi_init_ramrod_params { + struct iscsi_spe_func_init iscsi_init_spe; + struct tcp_init_params tcp_init; +}; + +struct ystorm_iscsi_conn_ag_ctx { + u8 byte0; + u8 byte1; + u8 flags0; +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2; + u8 byte3; + __le16 word0; + __le32 reg0; + __le32 reg1; + __le16 word1; + __le16 word2; + __le16 word3; + __le16 word4; + __le32 reg2; + __le32 reg3; +}; + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c new file mode 100644 index 0000000000..6263f847b6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -0,0 +1,932 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_reg_addr.h" +#include "qed_sriov.h" + +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10 + +/* Invalid values */ +#define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) + +struct qed_ptt { + struct list_head list_entry; + unsigned int idx; + struct pxp_ptt_entry pxp; + u8 hwfn_id; +}; + +struct qed_ptt_pool { + struct list_head free_list; + spinlock_t lock; /* ptt synchronized access */ + struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; +}; + +int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL); + int i; + + if (!p_pool) + return -ENOMEM; + + INIT_LIST_HEAD(&p_pool->free_list); + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_pool->ptts[i].idx = i; + p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET; + p_pool->ptts[i].pxp.pretend.control = 0; + p_pool->ptts[i].hwfn_id = p_hwfn->my_id; + if (i >= RESERVED_PTT_MAX) + list_add(&p_pool->ptts[i].list_entry, + &p_pool->free_list); + } + + p_hwfn->p_ptt_pool = p_pool; + spin_lock_init(&p_pool->lock); + + return 0; +} + +void qed_ptt_invalidate(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt; + int i; + + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; + p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET; + } +} + +void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = NULL; +} + +struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) +{ + return qed_ptt_acquire_context(p_hwfn, false); +} + +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic) +{ + struct qed_ptt *p_ptt; + unsigned int i, count; + + if (is_atomic) + count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT; + else + count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT; + + /* Take the free PTT from the list */ + for (i = 0; i < count; i++) { + spin_lock_bh(&p_hwfn->p_ptt_pool->lock); + + if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { + p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list, + struct qed_ptt, list_entry); + list_del(&p_ptt->list_entry); + + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "allocated ptt %d\n", p_ptt->idx); + return p_ptt; + } + + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); + + if (is_atomic) + udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY); + else + usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP, + QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2); + } + + DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); + return NULL; +} + +void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + spin_lock_bh(&p_hwfn->p_ptt_pool->lock); + list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); + spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); +} + +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + /* The HW is using DWORDS and we need to translate it to Bytes */ + return le32_to_cpu(p_ptt->pxp.offset) << 2; +} + +static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt) +{ + return PXP_PF_WINDOW_ADMIN_PER_PF_START + + p_ptt->idx * sizeof(struct pxp_ptt_entry); +} + +u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt) +{ + return PXP_EXTERNAL_BAR_PF_WINDOW_START + + p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; +} + +void qed_ptt_set_win(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 new_hw_addr) +{ + u32 prev_hw_addr; + + prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); + + if (new_hw_addr == prev_hw_addr) + return; + + /* Update PTT entery in admin window */ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Updating PTT entry %d to offset 0x%x\n", + p_ptt->idx, new_hw_addr); + + /* The HW is using DWORDS and the address is in Bytes */ + p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, offset), + le32_to_cpu(p_ptt->pxp.offset)); +} + +static u32 qed_set_ptt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 hw_addr) +{ + u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt); + u32 offset; + + offset = hw_addr - win_hw_addr; + + if (p_ptt->hwfn_id != p_hwfn->my_id) + DP_NOTICE(p_hwfn, + "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n", + p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id); + + /* Verify the address is within the window */ + if (hw_addr < win_hw_addr || + offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr); + offset = 0; + } + + return qed_ptt_get_bar_addr(p_ptt) + offset; +} + +struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, + enum reserved_ptts ptt_idx) +{ + if (ptt_idx >= RESERVED_PTT_MAX) { + DP_NOTICE(p_hwfn, + "Requested PTT %d is out of range\n", ptt_idx); + return NULL; + } + + return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; +} + +void qed_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, u32 val) +{ + u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); + + REG_WR(p_hwfn, bar_addr, val); + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); +} + +u32 qed_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr) +{ + u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr); + u32 val = REG_RD(p_hwfn, bar_addr); + + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + + return val; +} + +static void qed_memcpy_hw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *addr, u32 hw_addr, size_t n, bool to_device) +{ + u32 dw_count, *host_addr, hw_offset; + size_t quota, done = 0; + u32 __iomem *reg_addr; + + while (done < n) { + quota = min_t(size_t, n - done, + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); + + if (IS_PF(p_hwfn->cdev)) { + qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = qed_ptt_get_bar_addr(p_ptt); + } else { + hw_offset = hw_addr + done; + } + + dw_count = quota / 4; + host_addr = (u32 *)((u8 *)addr + done); + reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset); + if (to_device) + while (dw_count--) + DIRECT_REG_WR(reg_addr++, *host_addr++); + else + while (dw_count--) + *host_addr++ = DIRECT_REG_RD(reg_addr++); + + done += quota; + } +} + +void qed_memcpy_from(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n) +{ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", + hw_addr, dest, hw_addr, (unsigned long)n); + + qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); +} + +void qed_memcpy_to(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n) +{ + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", + hw_addr, hw_addr, src, (unsigned long)n); + + qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); +} + +void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + + /* Every pretend undos previous pretends, including + * previous port pretend. + */ + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void qed_port_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + + p_ptt->pxp.pretend.control = cpu_to_le16(control); + + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid) +{ + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + p_ptt->pxp.pretend.control = cpu_to_le16(control); + p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid); + REG_WR(p_hwfn, + qed_ptt_config_addr(p_ptt) + + offsetof(struct pxp_ptt_entry, pretend), + *(u32 *)&p_ptt->pxp.pretend); +} + +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) +{ + u32 concrete_fid = 0; + + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); + + return concrete_fid; +} + +/* DMAE */ +#define QED_DMAE_FLAGS_IS_SET(params, flag) \ + ((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag)) + +static void qed_dmae_opcode(struct qed_hwfn *p_hwfn, + const u8 is_src_type_grc, + const u8 is_dst_type_grc, + struct qed_dmae_params *p_params) +{ + u8 src_pfid, dst_pfid, port_id; + u16 opcode_b = 0; + u32 opcode = 0; + + /* Whether the source is the PCIe or the GRC. + * 0- The source is the PCIe + * 1- The source is the GRC. + */ + SET_FIELD(opcode, DMAE_CMD_SRC, + (is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie)); + src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ? + p_params->src_pfid : p_hwfn->rel_pf_id; + SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid); + + /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ + SET_FIELD(opcode, DMAE_CMD_DST, + (is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie)); + dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ? + p_params->dst_pfid : p_hwfn->rel_pf_id; + SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid); + + + /* Whether to write a completion word to the completion destination: + * 0-Do not write a completion word + * 1-Write the completion word + */ + SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1); + SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); + + if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST)) + SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1); + + /* swapping mode 3 - big endian */ + SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY); + + port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ? + p_params->port_id : p_hwfn->port_id; + SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id); + + /* reset source address in next go */ + SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1); + + /* reset dest address in next go */ + SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1); + + /* SRC/DST VFID: all 1's - pf, otherwise VF id */ + if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) { + SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1); + SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid); + } else { + SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF); + } + if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) { + SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1); + SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid); + } else { + SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF); + } + + p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b); +} + +u32 qed_dmae_idx_to_go_cmd(u8 idx) +{ + /* All the DMAE 'go' registers form an array in internal memory */ + return DMAE_REG_GO_C0 + (idx << 2); +} + +static int qed_dmae_post_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; + u8 idx_cmd = p_hwfn->dmae_info.channel, i; + int qed_status = 0; + + /* verify address is not NULL */ + if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || + ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { + DP_NOTICE(p_hwfn, + "source or destination address 0 idx_cmd=%d\n" + "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); + + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, + le32_to_cpu(p_command->opcode), + le16_to_cpu(p_command->opcode_b), + le16_to_cpu(p_command->length_dw), + le32_to_cpu(p_command->src_addr_hi), + le32_to_cpu(p_command->src_addr_lo), + le32_to_cpu(p_command->dst_addr_hi), + le32_to_cpu(p_command->dst_addr_lo)); + + /* Copy the command to DMAE - need to do it before every call + * for source/dest address no reset. + * The first 9 DWs are the command registers, the 10 DW is the + * GO register, and the rest are result registers + * (which are read only by the client). + */ + for (i = 0; i < DMAE_CMD_SIZE; i++) { + u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? + *(((u32 *)p_command) + i) : 0; + + qed_wr(p_hwfn, p_ptt, + DMAE_REG_CMD_MEM + + (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + + (i * sizeof(u32)), data); + } + + qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); + + return qed_status; +} + +int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn) +{ + dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; + struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; + u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; + u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; + + *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32), p_addr, GFP_KERNEL); + if (!*p_comp) + goto err; + + p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; + *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct dmae_cmd), + p_addr, GFP_KERNEL); + if (!*p_cmd) + goto err; + + p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; + *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32) * DMAE_MAX_RW_SIZE, + p_addr, GFP_KERNEL); + if (!*p_buff) + goto err; + + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + + return 0; +err: + qed_dmae_info_free(p_hwfn); + return -ENOMEM; +} + +void qed_dmae_info_free(struct qed_hwfn *p_hwfn) +{ + dma_addr_t p_phys; + + /* Just make sure no one is in the middle */ + mutex_lock(&p_hwfn->dmae_info.mutex); + + if (p_hwfn->dmae_info.p_completion_word) { + p_phys = p_hwfn->dmae_info.completion_word_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32), + p_hwfn->dmae_info.p_completion_word, p_phys); + p_hwfn->dmae_info.p_completion_word = NULL; + } + + if (p_hwfn->dmae_info.p_dmae_cmd) { + p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct dmae_cmd), + p_hwfn->dmae_info.p_dmae_cmd, p_phys); + p_hwfn->dmae_info.p_dmae_cmd = NULL; + } + + if (p_hwfn->dmae_info.p_intermediate_buffer) { + p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(u32) * DMAE_MAX_RW_SIZE, + p_hwfn->dmae_info.p_intermediate_buffer, + p_phys); + p_hwfn->dmae_info.p_intermediate_buffer = NULL; + } + + mutex_unlock(&p_hwfn->dmae_info.mutex); +} + +static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn) +{ + u32 wait_cnt_limit = 10000, wait_cnt = 0; + int qed_status = 0; + + barrier(); + while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { + udelay(DMAE_MIN_WAIT_TIME); + if (++wait_cnt > wait_cnt_limit) { + DP_NOTICE(p_hwfn->cdev, + "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n", + *p_hwfn->dmae_info.p_completion_word, + DMAE_COMPLETION_VAL); + qed_status = -EBUSY; + break; + } + + /* to sync the completion_word since we are not + * using the volatile keyword for p_completion_word + */ + barrier(); + } + + if (qed_status == 0) + *p_hwfn->dmae_info.p_completion_word = 0; + + return qed_status; +} + +static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, + u8 dst_type, + u32 length_dw) +{ + dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + int qed_status = 0; + + switch (src_type) { + case QED_DMAE_ADDRESS_GRC: + case QED_DMAE_ADDRESS_HOST_PHYS: + cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr)); + cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr)); + break; + /* for virtual source addresses we use the intermediate buffer. */ + case QED_DMAE_ADDRESS_HOST_VIRT: + cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys)); + memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0], + (void *)(uintptr_t)src_addr, + length_dw * sizeof(u32)); + break; + default: + return -EINVAL; + } + + switch (dst_type) { + case QED_DMAE_ADDRESS_GRC: + case QED_DMAE_ADDRESS_HOST_PHYS: + cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr)); + cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr)); + break; + /* for virtual source addresses we use the intermediate buffer. */ + case QED_DMAE_ADDRESS_HOST_VIRT: + cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys)); + break; + default: + return -EINVAL; + } + + cmd->length_dw = cpu_to_le16((u16)length_dw); + + qed_dmae_post_command(p_hwfn, p_ptt); + + qed_status = qed_dmae_operation_wait(p_hwfn); + + if (qed_status) { + DP_NOTICE(p_hwfn, + "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n", + src_addr, dst_addr, length_dw); + return qed_status; + } + + if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT) + memcpy((void *)(uintptr_t)(dst_addr), + &p_hwfn->dmae_info.p_intermediate_buffer[0], + length_dw * sizeof(u32)); + + return 0; +} + +static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 src_addr, u64 dst_addr, + u8 src_type, u8 dst_type, + u32 size_in_dwords, + struct qed_dmae_params *p_params) +{ + dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; + u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + u64 src_addr_split = 0, dst_addr_split = 0; + u16 length_limit = DMAE_MAX_RW_SIZE; + int qed_status = 0; + u32 offset = 0; + + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_HW, + "Recovery is in progress. Avoid DMAE transaction [{src: addr 0x%llx, type %d}, {dst: addr 0x%llx, type %d}, size %d].\n", + src_addr, src_type, dst_addr, dst_type, + size_in_dwords); + + /* Let the flow complete w/o any error handling */ + return 0; + } + + qed_dmae_opcode(p_hwfn, + (src_type == QED_DMAE_ADDRESS_GRC), + (dst_type == QED_DMAE_ADDRESS_GRC), + p_params); + + cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys)); + cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys)); + cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL); + + /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ + cnt_split = size_in_dwords / length_limit; + length_mod = size_in_dwords % length_limit; + + src_addr_split = src_addr; + dst_addr_split = dst_addr; + + for (i = 0; i <= cnt_split; i++) { + offset = length_limit * i; + + if (!QED_DMAE_FLAGS_IS_SET(p_params, RW_REPL_SRC)) { + if (src_type == QED_DMAE_ADDRESS_GRC) + src_addr_split = src_addr + offset; + else + src_addr_split = src_addr + (offset * 4); + } + + if (dst_type == QED_DMAE_ADDRESS_GRC) + dst_addr_split = dst_addr + offset; + else + dst_addr_split = dst_addr + (offset * 4); + + length_cur = (cnt_split == i) ? length_mod : length_limit; + + /* might be zero on last iteration */ + if (!length_cur) + continue; + + qed_status = qed_dmae_execute_sub_operation(p_hwfn, + p_ptt, + src_addr_split, + dst_addr_split, + src_type, + dst_type, + length_cur); + if (qed_status) { + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_DMAE_FAIL, + "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n", + qed_status, src_addr, + dst_addr, length_cur); + break; + } + } + + return qed_status; +} + +int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u64 source_addr, u32 grc_addr, u32 size_in_dwords, + struct qed_dmae_params *p_params) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + int rc; + + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, + grc_addr_in_dw, + QED_DMAE_ADDRESS_HOST_VIRT, + QED_DMAE_ADDRESS_GRC, + size_in_dwords, p_params); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + +int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, + struct qed_dmae_params *p_params) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + int rc; + + + mutex_lock(&p_hwfn->dmae_info.mutex); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, QED_DMAE_ADDRESS_GRC, + QED_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, p_params); + + mutex_unlock(&p_hwfn->dmae_info.mutex); + + return rc; +} + +int qed_dmae_host2host(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct qed_dmae_params *p_params) +{ + int rc; + + mutex_lock(&(p_hwfn->dmae_info.mutex)); + + rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr, + dest_addr, + QED_DMAE_ADDRESS_HOST_PHYS, + QED_DMAE_ADDRESS_HOST_PHYS, + size_in_dwords, p_params); + + mutex_unlock(&(p_hwfn->dmae_info.mutex)); + + return rc; +} + +void qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_hw_err_type err_type, const char *fmt, ...) +{ + char buf[QED_HW_ERR_MAX_STR_SIZE]; + va_list vl; + int len; + + if (fmt) { + va_start(vl, fmt); + len = vsnprintf(buf, QED_HW_ERR_MAX_STR_SIZE, fmt, vl); + va_end(vl); + + if (len > QED_HW_ERR_MAX_STR_SIZE - 1) + len = QED_HW_ERR_MAX_STR_SIZE - 1; + + DP_NOTICE(p_hwfn, "%s", buf); + } + + /* Fan failure cannot be masked by handling of another HW error */ + if (p_hwfn->cdev->recov_in_prog && + err_type != QED_HW_ERR_FAN_FAIL) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_DRV, + "Recovery is in progress. Avoid notifying about HW error %d.\n", + err_type); + return; + } + + qed_hw_error_occurred(p_hwfn, err_type); + + if (fmt) + qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, len); +} + +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase) +{ + u32 size = PAGE_SIZE / 2, val; + int rc = 0; + dma_addr_t p_phys; + void *p_virt; + u32 *p_tmp; + + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + 2 * size, &p_phys, GFP_KERNEL); + if (!p_virt) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: failed to allocate memory\n", + phase); + return -ENOMEM; + } + + /* Fill the bottom half of the allocated memory with a known pattern */ + for (p_tmp = (u32 *)p_virt; + p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) { + /* Save the address itself as the value */ + val = (u32)(uintptr_t)p_tmp; + *p_tmp = val; + } + + /* Zero the top half of the allocated memory */ + memset((u8 *)p_virt + size, 0, size); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n", + phase, + (u64)p_phys, + p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size); + + rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size, + size / 4, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n", + phase, rc); + goto out; + } + + /* Verify that the top half of the allocated memory has the pattern */ + for (p_tmp = (u32 *)((u8 *)p_virt + size); + p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) { + /* The corresponding address in the bottom half */ + val = (u32)(uintptr_t)p_tmp - size; + + if (*p_tmp != val) { + DP_NOTICE(p_hwfn, + "DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n", + phase, + (u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt), + p_tmp, *p_tmp, val); + rc = -EINVAL; + goto out; + } + } + +out: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h new file mode 100644 index 0000000000..e535983ce2 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_HW_H +#define _QED_HW_H + +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_dev_api.h" + +/* Forward decleration */ +struct qed_ptt; + +enum reserved_ptts { + RESERVED_PTT_EDIAG, + RESERVED_PTT_USER_SPACE, + RESERVED_PTT_MAIN, + RESERVED_PTT_DPC, + RESERVED_PTT_MAX +}; + +enum _dmae_cmd_dst_mask { + DMAE_CMD_DST_MASK_NONE = 0, + DMAE_CMD_DST_MASK_PCIE = 1, + DMAE_CMD_DST_MASK_GRC = 2 +}; + +enum _dmae_cmd_src_mask { + DMAE_CMD_SRC_MASK_PCIE = 0, + DMAE_CMD_SRC_MASK_GRC = 1 +}; + +enum _dmae_cmd_crc_mask { + DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0, + DMAE_CMD_COMP_CRC_EN_MASK_SET = 1 +}; + +/* definitions for DMA constants */ +#define DMAE_GO_VALUE 0x1 + +#define DMAE_COMPLETION_VAL 0xD1AE +#define DMAE_CMD_ENDIANITY 0x2 + +#define DMAE_CMD_SIZE 14 +#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5) +#define DMAE_MIN_WAIT_TIME 0x2 +#define DMAE_MAX_CLIENTS 32 + +/** + * qed_gtt_init(): Initialize GTT windows. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_gtt_init(struct qed_hwfn *p_hwfn); + +/** + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); + +/** + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. + * + * @p_hwfn: HW device data. + * + * Return: struct _qed_status - success (0), negative - error. + */ +int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); + +/** + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt + * + * Return: u32. + */ +u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. + * + * @p_ptt: P_ptt + * + * Return: u32. + */ +u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); + +/** + * qed_ptt_set_win(): Set PTT Window's GRC BAR address + * + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. + */ +void qed_ptt_set_win(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 new_hw_addr); + +/** + * qed_get_reserved_ptt(): Get a specific reserved PTT. + * + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. + * + * Return: struct qed_ptt *. + */ +struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, + enum reserved_ptts ptt_idx); + +/** + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address + * + * Return: Void. + */ +void qed_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, + u32 val); + +/** + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address + * + * Return: Void. + */ +u32 qed_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr); + +/** + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. + */ +void qed_memcpy_from(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *dest, + u32 hw_addr, + size_t n); + +/** + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. + */ +void qed_memcpy_to(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 hw_addr, + void *src, + size_t n); +/** + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. + */ +void qed_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 fid); + +/** + * qed_port_pretend(): Pretend to another port when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. + */ +void qed_port_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 port_id); + +/** + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_port_unpretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. + * + * Return: Void. + */ +void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 port_id, u16 fid); + +/** + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID + * + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. + */ +u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); + +/** + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. + */ +u32 qed_dmae_idx_to_go_cmd(u8 idx); + +/** + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_dmae_info_free(struct qed_hwfn *p_hwfn); + +union qed_qm_pq_params { + struct { + u8 q_idx; + } iscsi; + + struct { + u8 tc; + } core; + + struct { + u8 is_vf; + u8 vf_id; + u8 tc; + } eth; + + struct { + u8 dcqcn; + u8 qpid; /* roce relative */ + } roce; +}; + +int qed_init_fw_data(struct qed_dev *cdev, + const u8 *fw_data); + +int qed_dmae_sanity(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const char *phase); + +#define QED_HW_ERR_MAX_STR_SIZE 256 + +/** + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. + */ +void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_hw_err_type err_type, + const char *fmt, ...); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c new file mode 100644 index 0000000000..407029a36f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -0,0 +1,1932 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <linux/crc8.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_iro_hsi.h" +#include "qed_reg_addr.h" + +#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG + +static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = { + {400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */ + {528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */ + {608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */ +}; + +static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = { + {240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */ +}; + +/* General constants */ +#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \ + QM_PQ_ELEMENT_SIZE, \ + 0x1000) : 0) +#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ + 0x100) - 1 : 0) +#define QM_INVALID_PQ_ID 0xffff + +/* Max link speed (in Mbps) */ +#define QM_MAX_LINK_SPEED 100000 + +/* Feature enable */ +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 + +/* Initial VOQ byte credit */ +#define QM_INITIAL_VOQ_BYTE_CRD 98304 +/* Other PQ constants */ +#define QM_OTHER_PQS_PER_PF 4 + +/* VOQ constants */ +#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2) +#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1) + +/* WFQ constants */ + +/* PF WFQ increment value, 0x9000 = 4*9*1024 */ +#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000) + +/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_WFQ_UPPER_BOUND 62500000 + +/* PF WFQ max increment value, 0.7 * upper bound */ +#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10) + +/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */ +#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16 + +/* VP WFQ increment value */ +#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL) + +/* VP WFQ min increment value */ +#define QM_VP_WFQ_MIN_INC_VAL 10800 + +/* VP WFQ max increment value, 2^30 */ +#define QM_VP_WFQ_MAX_INC_VAL 0x40000000 + +/* VP WFQ bypass threshold */ +#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100) + +/* VP RL credit task cost */ +#define QM_VP_RL_CRD_TASK_COST 9700 + +/* Bit of VOQ in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_VOQ_SHIFT 0 + +/* Bit of PF in VP WFQ PQ map */ +#define QM_VP_WFQ_PQ_PF_SHIFT 5 + +/* RL constants */ + +/* Period in us */ +#define QM_RL_PERIOD 5 + +/* Period in 25MHz cycles */ +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) + +/* RL increment value - rate is specified in mbps */ +#define QM_RL_INC_VAL(rate) ({ \ + typeof(rate) __rate = (rate); \ + max_t(u32, \ + (u32)(((__rate ? __rate : \ + 100000) * \ + QM_RL_PERIOD * \ + 101) / (8 * 100)), 1); }) + +/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */ +#define QM_PF_RL_UPPER_BOUND 62500000 + +/* Max PF RL increment value is 0.7 * upper bound */ +#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10) + +/* QCN RL Upper bound, speed is in Mpbs */ +#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \ + u32, \ + (u32)(((speed) * \ + QM_RL_PERIOD * 101) / (8 * 100)), \ + QM_VP_RL_CRD_TASK_COST \ + + 1000)) + +/* AFullOprtnstcCrdMask constants */ +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 + +/* Command Queue constants */ + +/* Pure LB CmdQ lines (+spare) */ +#define PBF_CMDQ_PURE_LB_LINES 150 + +#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \ + (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \ + PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) + +#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \ + (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \ + (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ + PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) + +/* Returns the VOQ line credit for the specified number of PBF command lines. + * PBF lines are specified in 256b units. + */ +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ + ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) + +/* BTB: blocks constants (block size = 256B) */ + +/* 256B blocks in 9700B packet */ +#define BTB_JUMBO_PKT_BLOCKS 38 + +/* Headroom per-port */ +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS +#define BTB_PURE_LB_FACTOR 10 + +/* Factored (hence really 0.7) */ +#define BTB_PURE_LB_RATIO 7 + +/* QM stop command constants */ +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 + +/* QM command macros */ +#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, value) \ + SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \ + cmd ## _ ## field, \ + value) + +#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \ + rl_id, ext_voq, wrr) \ + do { \ + u32 __reg = 0; \ + \ + BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \ + memset(&(map), 0, sizeof(map)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \ + !!(rl_valid)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \ + SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \ + (wrr)); \ + \ + STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \ + __reg); \ + (map).reg = cpu_to_le32(__reg); \ + } while (0) + +#define WRITE_PQ_INFO_TO_RAM 1 +#define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \ + (((vp) << 0) | ((pf) << 12) | ((tc) << 16) | ((port) << 20) | \ + ((rl_valid ? 1 : 0) << 22) | (((rl) & 255) << 24) | \ + (((rl) >> 8) << 9)) + +#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \ + (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \ + XSTORM_PQ_INFO_OFFSET(pq_id)) + +static const char * const s_protocol_types[] = { + "PROTOCOLID_ISCSI", "PROTOCOLID_FCOE", "PROTOCOLID_ROCE", + "PROTOCOLID_CORE", "PROTOCOLID_ETH", "PROTOCOLID_IWARP", + "PROTOCOLID_TOE", "PROTOCOLID_PREROCE", "PROTOCOLID_COMMON", + "PROTOCOLID_TCP", "PROTOCOLID_RDMA", "PROTOCOLID_SCSI", +}; + +static const char *s_ramrod_cmd_ids[][28] = { + { + "ISCSI_RAMROD_CMD_ID_UNUSED", "ISCSI_RAMROD_CMD_ID_INIT_FUNC", + "ISCSI_RAMROD_CMD_ID_DESTROY_FUNC", + "ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN", + "ISCSI_RAMROD_CMD_ID_UPDATE_CONN", + "ISCSI_RAMROD_CMD_ID_TERMINATION_CONN", + "ISCSI_RAMROD_CMD_ID_CLEAR_SQ", "ISCSI_RAMROD_CMD_ID_MAC_UPDATE", + "ISCSI_RAMROD_CMD_ID_CONN_STATS", }, + { "FCOE_RAMROD_CMD_ID_INIT_FUNC", "FCOE_RAMROD_CMD_ID_DESTROY_FUNC", + "FCOE_RAMROD_CMD_ID_STAT_FUNC", + "FCOE_RAMROD_CMD_ID_OFFLOAD_CONN", + "FCOE_RAMROD_CMD_ID_TERMINATE_CONN", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", "ROCE_RAMROD_CREATE_QP", + "ROCE_RAMROD_MODIFY_QP", "ROCE_RAMROD_QUERY_QP", + "ROCE_RAMROD_DESTROY_QP", "ROCE_RAMROD_CREATE_UD_QP", + "ROCE_RAMROD_DESTROY_UD_QP", "ROCE_RAMROD_FUNC_UPDATE", + "ROCE_RAMROD_SUSPEND_QP", "ROCE_RAMROD_QUERY_SUSPENDED_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_QP", "ROCE_RAMROD_RESUME_QP", + "ROCE_RAMROD_SUSPEND_UD_QP", "ROCE_RAMROD_RESUME_UD_QP", + "ROCE_RAMROD_CREATE_SUSPENDED_UD_QP", "ROCE_RAMROD_FLUSH_DPT_QP", }, + { "CORE_RAMROD_UNUSED", "CORE_RAMROD_RX_QUEUE_START", + "CORE_RAMROD_TX_QUEUE_START", "CORE_RAMROD_RX_QUEUE_STOP", + "CORE_RAMROD_TX_QUEUE_STOP", + "CORE_RAMROD_RX_QUEUE_FLUSH", + "CORE_RAMROD_TX_QUEUE_UPDATE", "CORE_RAMROD_QUEUE_STATS_QUERY", }, + { "ETH_RAMROD_UNUSED", "ETH_RAMROD_VPORT_START", + "ETH_RAMROD_VPORT_UPDATE", "ETH_RAMROD_VPORT_STOP", + "ETH_RAMROD_RX_QUEUE_START", "ETH_RAMROD_RX_QUEUE_STOP", + "ETH_RAMROD_TX_QUEUE_START", "ETH_RAMROD_TX_QUEUE_STOP", + "ETH_RAMROD_FILTERS_UPDATE", "ETH_RAMROD_RX_QUEUE_UPDATE", + "ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION", + "ETH_RAMROD_RX_ADD_OPENFLOW_FILTER", + "ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER", + "ETH_RAMROD_RX_ADD_UDP_FILTER", + "ETH_RAMROD_RX_DELETE_UDP_FILTER", + "ETH_RAMROD_RX_CREATE_GFT_ACTION", + "ETH_RAMROD_RX_UPDATE_GFT_FILTER", "ETH_RAMROD_TX_QUEUE_UPDATE", + "ETH_RAMROD_RGFS_FILTER_ADD", "ETH_RAMROD_RGFS_FILTER_DEL", + "ETH_RAMROD_TGFS_FILTER_ADD", "ETH_RAMROD_TGFS_FILTER_DEL", + "ETH_RAMROD_GFS_COUNTERS_REPORT_REQUEST", }, + { "RDMA_RAMROD_UNUSED", "RDMA_RAMROD_FUNC_INIT", + "RDMA_RAMROD_FUNC_CLOSE", "RDMA_RAMROD_REGISTER_MR", + "RDMA_RAMROD_DEREGISTER_MR", "RDMA_RAMROD_CREATE_CQ", + "RDMA_RAMROD_RESIZE_CQ", "RDMA_RAMROD_DESTROY_CQ", + "RDMA_RAMROD_CREATE_SRQ", "RDMA_RAMROD_MODIFY_SRQ", + "RDMA_RAMROD_DESTROY_SRQ", "RDMA_RAMROD_START_NS_TRACKING", + "RDMA_RAMROD_STOP_NS_TRACKING", + "IWARP_RAMROD_CMD_ID_TCP_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD", + "IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR", + "IWARP_RAMROD_CMD_ID_CREATE_QP", "IWARP_RAMROD_CMD_ID_QUERY_QP", + "IWARP_RAMROD_CMD_ID_MODIFY_QP", + "IWARP_RAMROD_CMD_ID_DESTROY_QP", + "IWARP_RAMROD_CMD_ID_ABORT_TCP_OFFLOAD", }, + { NULL }, /*TOE*/ + { NULL }, /*PREROCE*/ + { "COMMON_RAMROD_UNUSED", "COMMON_RAMROD_PF_START", + "COMMON_RAMROD_PF_STOP", "COMMON_RAMROD_VF_START", + "COMMON_RAMROD_VF_STOP", "COMMON_RAMROD_PF_UPDATE", + "COMMON_RAMROD_RL_UPDATE", "COMMON_RAMROD_EMPTY", } +}; + +/******************** INTERNAL IMPLEMENTATION *********************/ + +/* Returns the external VOQ number */ +static u8 qed_get_ext_voq(struct qed_hwfn *p_hwfn, + u8 port_id, u8 tc, u8 max_phys_tcs_per_port) +{ + if (tc == PURE_LB_TC) + return NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB + port_id; + else + return port_id * max_phys_tcs_per_port + tc; +} + +/* Prepare PF RL enable/disable runtime init values */ +static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); + if (pf_rl_en) { + u8 num_ext_voqs = MAX_NUM_VOQS; + u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1; + + /* Enable RLs for all VOQs */ + STORE_RT_REG(p_hwfn, + QM_REG_RLPFVOQENABLE_RT_OFFSET, + (u32)voq_bit_mask); + + /* Write RL period */ + STORE_RT_REG(p_hwfn, + QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFPERIODTIMER_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + + /* Set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, + QM_PF_RL_UPPER_BOUND); + } +} + +/* Prepare PF WFQ enable/disable runtime init values */ +static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); + + /* Set credit threshold for QM bypass flow */ + if (pf_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, + QM_PF_WFQ_UPPER_BOUND); +} + +/* Prepare global RL enable/disable runtime init values */ +static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, + global_rl_en ? 1 : 0); + if (global_rl_en) { + /* Write RL period (use timer 0 only) */ + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLPERIOD_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + + /* Set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, + QM_GLOBAL_RL_UPPER_BOUND(10000) - 1); + } +} + +/* Prepare VPORT WFQ enable/disable runtime init values */ +static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, + vport_wfq_en ? 1 : 0); + + /* Set credit threshold for QM bypass flow */ + if (vport_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, + QM_VP_WFQ_BYPASS_THRESH); +} + +/* Prepare runtime init values to allocate PBF command queue lines for + * the specified VOQ. + */ +static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn, + u8 ext_voq, u16 cmdq_lines) +{ + u32 qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); + + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), + (u32)cmdq_lines); + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq, + qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq, + qm_line_crd); +} + +/* Prepare runtime init values to allocate PBF command queue lines. */ +static void +qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) +{ + u8 tc, ext_voq, port_id, num_tcs_in_port; + u8 num_ext_voqs = MAX_NUM_VOQS; + + /* Clear PBF lines of all VOQs */ + for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0); + + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + u16 phys_lines, phys_lines_per_tc; + + if (!port_params[port_id].active) + continue; + + /* Find number of command queue lines to divide between the + * active physical TCs. + */ + phys_lines = port_params[port_id].num_pbf_cmd_lines; + phys_lines -= PBF_CMDQ_PURE_LB_LINES; + + /* Find #lines per active physical TC */ + num_tcs_in_port = 0; + for (tc = 0; tc < max_phys_tcs_per_port; tc++) + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + phys_lines_per_tc = phys_lines / num_tcs_in_port; + + /* Init registers per active TC */ + for (tc = 0; tc < max_phys_tcs_per_port; tc++) { + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + tc, max_phys_tcs_per_port); + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + qed_cmdq_lines_voq_rt_init(p_hwfn, + ext_voq, + phys_lines_per_tc); + } + + /* Init registers for pure LB TC */ + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + qed_cmdq_lines_voq_rt_init(p_hwfn, ext_voq, + PBF_CMDQ_PURE_LB_LINES); + } +} + +/* Prepare runtime init values to allocate guaranteed BTB blocks for the + * specified port. The guaranteed BTB space is divided between the TCs as + * follows (shared space Is currently not used): + * 1. Parameters: + * B - BTB blocks for this port + * C - Number of physical TCs for this port + * 2. Calculation: + * a. 38 blocks (9700B jumbo frame) are allocated for global per port + * headroom. + * b. B = B - 38 (remainder after global headroom allocation). + * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. + * d. B = B - MAX(38, B/(C+0.7)) (remainder after pure LB allocation). + * e. B/C blocks are allocated for each physical TC. + * Assumptions: + * - MTU is up to 9700 bytes (38 blocks) + * - All TCs are considered symmetrical (same rate and packet size) + * - No optimization for lossy TC (all are considered lossless). Shared space + * is not enabled and allocated for each TC. + */ +static void +qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params port_params[MAX_NUM_PORTS]) +{ + u32 usable_blocks, pure_lb_blocks, phys_blocks; + u8 tc, ext_voq, port_id, num_tcs_in_port; + + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (!port_params[port_id].active) + continue; + + /* Subtract headroom blocks */ + usable_blocks = port_params[port_id].num_btb_blocks - + BTB_HEADROOM_BLOCKS; + + /* Find blocks per physical TC. Use factor to avoid floating + * arithmethic. + */ + num_tcs_in_port = 0; + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) + num_tcs_in_port++; + + pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) / + (num_tcs_in_port * BTB_PURE_LB_FACTOR + + BTB_PURE_LB_RATIO); + pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS, + pure_lb_blocks / BTB_PURE_LB_FACTOR); + phys_blocks = (usable_blocks - pure_lb_blocks) / + num_tcs_in_port; + + /* Init physical TCs */ + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + if (((port_params[port_id].active_phys_tcs >> + tc) & 0x1) == 1) { + ext_voq = + qed_get_ext_voq(p_hwfn, + port_id, + tc, + max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET + (ext_voq), phys_blocks); + } + } + + /* Init pure LB TC */ + ext_voq = qed_get_ext_voq(p_hwfn, + port_id, + PURE_LB_TC, max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq), + pure_lb_blocks); + } +} + +/* Prepare runtime init values for the specified RL. + * Set max link speed (100Gbps) per rate limiter. + * Return -1 on error. + */ +static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn) +{ + u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) | + (u32)QM_RL_CRD_REG_SIGN_BIT; + u32 inc_val; + u16 rl_id; + + /* Go over all global RLs */ + for (rl_id = 0; rl_id < MAX_QM_GLOBAL_RLS; rl_id++) { + inc_val = QM_RL_INC_VAL(QM_MAX_LINK_SPEED); + + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, inc_val); + } + + return 0; +} + +/* Returns the upper bound for the specified Vport RL parameters. + * link_speed is in Mbps. + * Returns 0 in case of error. + */ +static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type, + u32 link_speed) +{ + switch (vport_rl_type) { + case QM_RL_TYPE_NORMAL: + return QM_INITIAL_VOQ_BYTE_CRD; + case QM_RL_TYPE_QCN: + return QM_GLOBAL_RL_UPPER_BOUND(link_speed); + default: + return 0; + } +} + +/* Prepare VPORT RL runtime init values. + * Return -1 on error. + */ +static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn, + u16 start_rl, + u16 num_rls, + u32 link_speed, + struct init_qm_rl_params *rl_params) +{ + u16 i, rl_id; + + if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) { + DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n"); + return -1; + } + + /* Go over all PF VPORTs */ + for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) { + u32 upper_bound, inc_val; + + upper_bound = + qed_get_vport_rl_upper_bound((enum init_qm_rl_type) + rl_params[i].vport_rl_type, + link_speed); + + inc_val = + QM_RL_INC_VAL(rl_params[i].vport_rl ? + rl_params[i].vport_rl : link_speed); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, + "Invalid RL rate - limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id, + inc_val); + } + + return 0; +} + +/* Prepare Tx PQ mapping runtime init values for the specified PF */ +static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params, + u32 base_mem_addr_4kb) +{ + u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + struct init_qm_vport_params *vport_params = p_params->vport_params; + u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE; + u16 num_pqs, first_pq_group, last_pq_group, i, j, pq_id, pq_group; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb; + + num_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + + first_pq_group = p_params->start_pq / QM_PF_QUEUE_GROUP_SIZE; + last_pq_group = (p_params->start_pq + num_pqs - 1) / + QM_PF_QUEUE_GROUP_SIZE; + + pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids); + vport_pq_mem_4kb = QM_PQ_MEM_4KB(p_params->num_vf_cids); + mem_addr_4kb = base_mem_addr_4kb; + + /* Set mapping from PQ group to PF */ + for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) + STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, + (u32)(p_params->pf_id)); + + /* Set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, + QM_PQ_SIZE_256B(p_params->num_pf_cids)); + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, + QM_PQ_SIZE_256B(p_params->num_vf_cids)); + + /* Go over all Tx PQs */ + for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) { + u16 *p_first_tx_pq_id, vport_id_in_pf; + struct qm_rf_pq_map tx_pq_map; + u8 tc_id = pq_params[i].tc_id; + bool is_vf_pq; + u8 ext_voq; + + ext_voq = qed_get_ext_voq(p_hwfn, + pq_params[i].port_id, + tc_id, + p_params->max_phys_tcs_per_port); + is_vf_pq = (i >= p_params->num_pf_pqs); + + /* Update first Tx PQ of VPORT/TC */ + vport_id_in_pf = pq_params[i].vport_id - p_params->start_vport; + p_first_tx_pq_id = + &vport_params[vport_id_in_pf].first_tx_pq_id[tc_id]; + if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) { + u32 map_val = + (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) | + (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT); + + /* Create new VP PQ */ + *p_first_tx_pq_id = pq_id; + + /* Map VP PQ to VOQ and PF */ + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPMAP_RT_OFFSET + + *p_first_tx_pq_id, + map_val); + } + + /* Prepare PQ map entry */ + QM_INIT_TX_PQ_MAP(p_hwfn, + tx_pq_map, + pq_id, + *p_first_tx_pq_id, + pq_params[i].rl_valid, + pq_params[i].rl_id, + ext_voq, pq_params[i].wrr_group); + + /* Set PQ base address */ + STORE_RT_REG(p_hwfn, + QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + + /* Clear PQ pointer table entry (64 bit) */ + if (p_params->is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLTX_RT_OFFSET + + (pq_id * 2) + j, 0); + + /* Write PQ info to RAM */ + if (WRITE_PQ_INFO_TO_RAM != 0) { + u32 pq_info = 0; + + pq_info = PQ_INFO_ELEMENT(*p_first_tx_pq_id, + p_params->pf_id, + tc_id, + pq_params[i].port_id, + pq_params[i].rl_valid, + pq_params[i].rl_id); + qed_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id), + pq_info); + } + + /* If VF PQ, add indication to PQ VF mask */ + if (is_vf_pq) { + tx_pq_vf_mask[pq_id / + QM_PF_QUEUE_GROUP_SIZE] |= + BIT((pq_id % QM_PF_QUEUE_GROUP_SIZE)); + mem_addr_4kb += vport_pq_mem_4kb; + } else { + mem_addr_4kb += pq_mem_4kb; + } + } + + /* Store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) + if (tx_pq_vf_mask[i]) + STORE_RT_REG(p_hwfn, + QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i, + tx_pq_vf_mask[i]); + + return 0; +} + +/* Prepare Other PQ mapping runtime init values for the specified PF */ +static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn, + u8 pf_id, + bool is_pf_loading, + u32 num_pf_cids, + u32 num_tids, u32 base_mem_addr_4kb) +{ + u32 pq_size, pq_mem_4kb, mem_addr_4kb; + u16 i, j, pq_id, pq_group; + + /* A single other PQ group is used in each PF, where PQ group i is used + * in PF i. + */ + pq_group = pf_id; + pq_size = num_pf_cids + num_tids; + pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + mem_addr_4kb = base_mem_addr_4kb; + + /* Map PQ group to PF */ + STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + + /* Set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, + QM_PQ_SIZE_256B(pq_size)); + + for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; + i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + /* Set PQ base address */ + STORE_RT_REG(p_hwfn, + QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + + /* Clear PQ pointer table entry */ + if (is_pf_loading) + for (j = 0; j < 2; j++) + STORE_RT_REG(p_hwfn, + QM_REG_PTRTBLOTHER_RT_OFFSET + + (pq_id * 2) + j, 0); + + mem_addr_4kb += pq_mem_4kb; + } +} + +/* Prepare PF WFQ runtime init values for the specified PF. + * Return -1 on error. + */ +static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_pf_rt_init_params *p_params) +{ + u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs; + struct init_qm_pq_params *pq_params = p_params->pq_params; + u32 inc_val, crd_reg_offset; + u8 ext_voq; + u16 i; + + inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq); + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); + return -1; + } + + for (i = 0; i < num_tx_pqs; i++) { + ext_voq = qed_get_ext_voq(p_hwfn, + pq_params[i].port_id, + pq_params[i].tc_id, + p_params->max_phys_tcs_per_port); + crd_reg_offset = + (p_params->pf_id < MAX_NUM_PFS_BB ? + QM_REG_WFQPFCRD_RT_OFFSET : + QM_REG_WFQPFCRD_MSB_RT_OFFSET) + + ext_voq * MAX_NUM_PFS_BB + + (p_params->pf_id % MAX_NUM_PFS_BB); + OVERWRITE_RT_REG(p_hwfn, + crd_reg_offset, (u32)QM_WFQ_CRD_REG_SIGN_BIT); + } + + STORE_RT_REG(p_hwfn, + QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id, + QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id, + inc_val); + + return 0; +} + +/* Prepare PF RL runtime init values for the specified PF. + * Return -1 on error. + */ +static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + + if (inc_val > QM_PF_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); + return -1; + } + + STORE_RT_REG(p_hwfn, + QM_REG_RLPFCRD_RT_OFFSET + pf_id, + (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + + return 0; +} + +/* Prepare VPORT WFQ runtime init values for the specified VPORTs. + * Return -1 on error. + */ +static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, + u16 num_vports, + struct init_qm_vport_params *vport_params) +{ + u16 vport_pq_id, wfq, i; + u32 inc_val; + u8 tc; + + /* Go over all PF VPORTs */ + for (i = 0; i < num_vports; i++) { + /* Each VPORT can have several VPORT PQ IDs for various TCs */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + /* Check if VPORT/TC is valid */ + vport_pq_id = vport_params[i].first_tx_pq_id[tc]; + if (vport_pq_id == QM_INVALID_PQ_ID) + continue; + + /* Find WFQ weight (per VPORT or per VPORT+TC) */ + wfq = vport_params[i].wfq; + wfq = wfq ? wfq : vport_params[i].tc_wfq[tc]; + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (inc_val > QM_VP_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, + "Invalid VPORT WFQ weight configuration\n"); + return -1; + } + + /* Config registers */ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET + + vport_pq_id, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); + } + } + + return 0; +} + +static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 reg_val, i; + + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val; + i++) { + udelay(QM_STOP_CMD_POLL_PERIOD_US); + reg_val = qed_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); + } + + /* Check if timeout while waiting for SDM command ready */ + if (i == QM_STOP_CMD_MAX_POLL_COUNT) { + DP_VERBOSE(p_hwfn, NETIF_MSG_HW, + "Timeout when waiting for QM SDM command ready signal\n"); + return false; + } + + return true; +} + +static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) +{ + if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) + return false; + + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); + qed_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); + + return qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt); +} + +/******************** INTERFACE IMPLEMENTATION *********************/ + +u32 qed_qm_pf_mem_size(u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) +{ + return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; +} + +int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn, + struct qed_qm_common_rt_init_params *p_params) +{ + u32 mask = 0; + + /* Init AFullOprtnstcCrdMask */ + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ, + QM_OPPOR_LINE_VOQ_DEF); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, + p_params->pf_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, + p_params->vport_wfq_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, + p_params->pf_rl_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL, + p_params->global_rl_en ? 1 : 0); + SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF); + SET_FIELD(mask, + QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF); + STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + + /* Enable/disable PF RL */ + qed_enable_pf_rl(p_hwfn, p_params->pf_rl_en); + + /* Enable/disable PF WFQ */ + qed_enable_pf_wfq(p_hwfn, p_params->pf_wfq_en); + + /* Enable/disable global RL */ + qed_enable_global_rl(p_hwfn, p_params->global_rl_en); + + /* Enable/disable VPORT WFQ */ + qed_enable_vport_wfq(p_hwfn, p_params->vport_wfq_en); + + /* Init PBF CMDQ line credit */ + qed_cmdq_lines_rt_init(p_hwfn, + p_params->max_ports_per_engine, + p_params->max_phys_tcs_per_port, + p_params->port_params); + + /* Init BTB blocks in PBF */ + qed_btb_blocks_rt_init(p_hwfn, + p_params->max_ports_per_engine, + p_params->max_phys_tcs_per_port, + p_params->port_params); + + qed_global_rl_rt_init(p_hwfn); + + return 0; +} + +int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_qm_pf_rt_init_params *p_params) +{ + struct init_qm_vport_params *vport_params = p_params->vport_params; + u32 other_mem_size_4kb = QM_PQ_MEM_4KB(p_params->num_pf_cids + + p_params->num_tids) * + QM_OTHER_PQS_PER_PF; + u16 i; + u8 tc; + + /* Clear first Tx PQ ID array for each VPORT */ + for (i = 0; i < p_params->num_vports; i++) + for (tc = 0; tc < NUM_OF_TCS; tc++) + vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; + + /* Map Other PQs (if any) */ + qed_other_pq_map_rt_init(p_hwfn, + p_params->pf_id, + p_params->is_pf_loading, p_params->num_pf_cids, + p_params->num_tids, 0); + + /* Map Tx PQs */ + if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb)) + return -1; + + /* Init PF WFQ */ + if (p_params->pf_wfq) + if (qed_pf_wfq_rt_init(p_hwfn, p_params)) + return -1; + + /* Init PF RL */ + if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) + return -1; + + /* Init VPORT WFQ */ + if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) + return -1; + + /* Set VPORT RL */ + if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl, + p_params->num_rls, p_params->link_speed, + p_params->rl_params)) + return -1; + + return 0; +} + +int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq) +{ + u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq); + + if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + + return 0; +} + +int qed_init_pf_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + + if (inc_val > QM_PF_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid PF rate limit configuration\n"); + return -1; + } + + qed_wr(p_hwfn, + p_ptt, QM_REG_RLPFCRD + pf_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); + + return 0; +} + +int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq) +{ + int result = 0; + u16 vport_pq_id; + u8 tc; + + for (tc = 0; tc < NUM_OF_TCS && !result; tc++) { + vport_pq_id = first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) + result = qed_init_vport_tc_wfq(p_hwfn, p_ptt, + vport_pq_id, wfq); + } + + return result; +} + +int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 first_tx_pq_id, u16 wfq) +{ + u32 inc_val; + + if (first_tx_pq_id == QM_INVALID_PQ_ID) + return -1; + + inc_val = QM_VP_WFQ_INC_VAL(wfq); + if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4, + (u32)QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4, + inc_val | QM_WFQ_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4, + inc_val); + + return 0; +} + +int qed_init_global_rl(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit, + enum init_qm_rl_type vport_rl_type) +{ + u32 inc_val, upper_bound; + + upper_bound = + (vport_rl_type == + QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) : + QM_INITIAL_VOQ_BYTE_CRD; + inc_val = QM_RL_INC_VAL(rate_limit); + if (inc_val > upper_bound) { + DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n"); + return -1; + } + + qed_wr(p_hwfn, p_ptt, + QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, + p_ptt, + QM_REG_RLGLBLUPPERBOUND + rl_id * 4, + upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT); + qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val); + + return 0; +} + +bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs) +{ + u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; + u32 pq_mask = 0, last_pq, pq_id; + + last_pq = start_pq + num_pqs - 1; + + /* Set command's PQ type */ + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + + /* Go over requested PQs */ + for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { + /* Set PQ bit in mask (stop command only) */ + if (!is_release_cmd) + pq_mask |= BIT((pq_id % QM_STOP_PQ_MASK_WIDTH)); + + /* If last PQ or end of PQ mask, write command */ + if ((pq_id == last_pq) || + (pq_id % QM_STOP_PQ_MASK_WIDTH == + (QM_STOP_PQ_MASK_WIDTH - 1))) { + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, PAUSE_MASK, pq_mask); + QM_CMD_SET_FIELD(cmd_arr, + QM_STOP_CMD, + GROUP_ID, + pq_id / QM_STOP_PQ_MASK_WIDTH); + if (!qed_send_qm_cmd(p_hwfn, p_ptt, QM_STOP_CMD_ADDR, + cmd_arr[0], cmd_arr[1])) + return false; + pq_mask = 0; + } + } + + return true; +} + +#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ + do { \ + typeof(var) *__p_var = &(var); \ + typeof(offset) __offset = offset; \ + *__p_var = (*__p_var & ~BIT(__offset)) | \ + ((enable) ? BIT(__offset) : 0); \ + } while (0) + +#define PRS_ETH_TUNN_OUTPUT_FORMAT 0xF4DAB910 +#define PRS_ETH_OUTPUT_FORMAT 0xFFFF4910 + +#define ARR_REG_WR(dev, ptt, addr, arr, arr_size) \ + do { \ + u32 i; \ + \ + for (i = 0; i < (arr_size); i++) \ + qed_wr(dev, ptt, \ + ((addr) + (4 * i)), \ + ((u32 *)&(arr))[i]); \ + } while (0) + +/** + * qed_dmae_to_grc() - Internal function for writing from host to + * wide-bus registers (split registers are not supported yet). + * + * @p_hwfn: HW device data. + * @p_ptt: PTT window used for writing the registers. + * @p_data: Pointer to source data. + * @addr: Destination register address. + * @len_in_dwords: Data length in dwords (u32). + * + * Return: Length of the written data in dwords (u32) or -1 on invalid + * input. + */ +static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + __le32 *p_data, u32 addr, u32 len_in_dwords) +{ + struct qed_dmae_params params = { 0 }; + u32 *data_cpu; + int rc; + + if (!p_data) + return -1; + + /* Set DMAE params */ + SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 1); + + /* Execute DMAE command */ + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)(p_data), + addr, len_in_dwords, ¶ms); + + /* If not read using DMAE, read using GRC */ + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_DEBUG, + "Failed writing to chip using DMAE, using GRC instead\n"); + + /* Swap to CPU byteorder and write to registers using GRC */ + data_cpu = (__force u32 *)p_data; + le32_to_cpu_array(data_cpu, len_in_dwords); + + ARR_REG_WR(p_hwfn, p_ptt, addr, data_cpu, len_in_dwords); + cpu_to_le32_array(data_cpu, len_in_dwords); + } + + return len_in_dwords; +} + +void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port) +{ + /* Update PRS register */ + qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + + /* Update NIG register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port); + + /* Update PBF register */ + qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); +} + +void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool vxlan_enable) +{ + u32 reg_val; + u8 shift; + + /* Update PRS register */ + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable); + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT; + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable); + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + /* Update DORQ register */ + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, vxlan_enable ? 1 : 0); +} + +void qed_set_gre_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable) +{ + u32 reg_val; + u8 shift; + + /* Update PRS register */ + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE, + eth_gre_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE, + ip_gre_enable); + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT; + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable); + shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT; + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable); + qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, eth_gre_enable ? 1 : 0); + qed_wr(p_hwfn, + p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, ip_gre_enable ? 1 : 0); +} + +void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 dest_port) +{ + /* Update PRS register */ + qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + + /* Update NIG register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + + /* Update PBF register */ + qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); +} + +void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable) +{ + u32 reg_val; + + /* Update PRS register */ + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE, + eth_geneve_enable); + SET_FIELD(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE, + ip_geneve_enable); + qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + reg_val = + qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0); + + /* Update output only if tunnel blocks not included. */ + if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT) + qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + (u32)PRS_ETH_TUNN_OUTPUT_FORMAT); + } + + /* Update NIG register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0); + + /* EDPM with geneve tunnel not supported in BB */ + if (QED_IS_BB_B0(p_hwfn->cdev)) + return; + + /* Update DORQ registers */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2, + eth_geneve_enable ? 1 : 0); + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2, + ip_geneve_enable ? 1 : 0); +} + +#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3 +#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910 + +void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool enable) +{ + u32 reg_val, cfg_mask; + + /* read PRS config register */ + reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_MSG_INFO); + + /* set VXLAN_NO_L2_ENABLE mask */ + cfg_mask = BIT(PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET); + + if (enable) { + /* set VXLAN_NO_L2_ENABLE flag */ + reg_val |= cfg_mask; + + /* update PRS FIC register */ + qed_wr(p_hwfn, + p_ptt, + PRS_REG_OUTPUT_FORMAT_4_0, + (u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT); + } else { + /* clear VXLAN_NO_L2_ENABLE flag */ + reg_val &= ~cfg_mask; + } + + /* write PRS config register */ + qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, reg_val); +} + +#define T_ETH_PACKET_ACTION_GFT_EVENTID 23 +#define PARSER_ETH_CONN_GFT_ACTION_CM_HDR 272 +#define T_ETH_PACKET_MATCH_RFS_EVENTID 25 +#define PARSER_ETH_CONN_CM_HDR 0 +#define CAM_LINE_SIZE sizeof(u32) +#define RAM_LINE_SIZE sizeof(u64) +#define REG_SIZE sizeof(u32) + +void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id) +{ + struct regpair ram_line = { 0 }; + + /* Disable gft search for PF */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0); + + /* Clean ram & cam for next gft session */ + + /* Zero camline */ + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0); + + /* Zero ramline */ + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); +} + +void qed_gft_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 pf_id, + bool tcp, + bool udp, + bool ipv4, bool ipv6, enum gft_profile_type profile_type) +{ + struct regpair ram_line; + u32 search_non_ip_as_gft; + u32 reg_val, cam_line; + u32 lo = 0, hi = 0; + + if (!ipv6 && !ipv4) + DP_NOTICE(p_hwfn, + "gft_config: must accept at least on of - ipv4 or ipv6'\n"); + if (!tcp && !udp) + DP_NOTICE(p_hwfn, + "gft_config: must accept at least on of - udp or tcp\n"); + if (profile_type >= MAX_GFT_PROFILE_TYPE) + DP_NOTICE(p_hwfn, "gft_config: unsupported gft_profile_type\n"); + + /* Set RFS event ID to be awakened i Tstorm By Prs */ + reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID << + PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT; + reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT; + qed_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val); + + /* Do not load context only cid in PRS on match. */ + qed_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0); + + /* Do not use tenant ID exist bit for gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0); + + /* Set Cam */ + cam_line = 0; + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1); + + /* Filters are per PF!! */ + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_PF_ID_MASK, + GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK); + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id); + + if (!(tcp && udp)) { + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK); + if (tcp) + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_TCP_PROTOCOL); + else + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE, + GFT_PROFILE_UDP_PROTOCOL); + } + + if (!(ipv4 && ipv6)) { + SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1); + if (ipv4) + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV4); + else + SET_FIELD(cam_line, + GFT_CAM_LINE_MAPPED_IP_VERSION, + GFT_PROFILE_IPV6); + } + + /* Write characteristics to cam */ + qed_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, + cam_line); + cam_line = + qed_rd(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id); + + /* Write line to RAM - compare to filter 4 tuple */ + + /* Search no IP as GFT */ + search_non_ip_as_gft = 0; + + /* Tunnel type */ + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); + + if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) { + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_SRC_PORT, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) { + SET_FIELD(hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + SET_FIELD(lo, GFT_RAM_LINE_DST_PORT, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_ADDR) { + SET_FIELD(hi, GFT_RAM_LINE_DST_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_IP_SRC_ADDR) { + SET_FIELD(hi, GFT_RAM_LINE_SRC_IP, 1); + SET_FIELD(lo, GFT_RAM_LINE_ETHERTYPE, 1); + } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { + SET_FIELD(lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); + + /* Allow tunneled traffic without inner IP */ + search_non_ip_as_gft = 1; + } + + ram_line.lo = cpu_to_le32(lo); + ram_line.hi = cpu_to_le32(hi); + + qed_wr(p_hwfn, + p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft); + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, + sizeof(ram_line) / REG_SIZE); + + /* Set default profile so that no filter match will happen */ + ram_line.lo = cpu_to_le32(0xffffffff); + ram_line.hi = cpu_to_le32(0x3ff); + qed_dmae_to_grc(p_hwfn, p_ptt, &ram_line.lo, + PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * + PRS_GFT_CAM_LINES_NO_MATCH, + sizeof(ram_line) / REG_SIZE); + + /* Enable gft search */ + qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1); +} + +DECLARE_CRC8_TABLE(cdu_crc8_table); + +/* Calculate and return CDU validation byte per connection type/region/cid */ +static u8 qed_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid) +{ + const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG; + u8 crc, validation_byte = 0; + static u8 crc8_table_valid; /* automatically initialized to 0 */ + u32 validation_string = 0; + __be32 data_to_crc; + + if (!crc8_table_valid) { + crc8_populate_msb(cdu_crc8_table, 0x07); + crc8_table_valid = 1; + } + + /* The CRC is calculated on the String-to-compress: + * [31:8] = {CID[31:20],CID[11:0]} + * [7:4] = Region + * [3:0] = Type + */ + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1) + validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1) + validation_string |= ((region & 0xF) << 4); + + if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1) + validation_string |= (conn_type & 0xF); + + /* Convert to big-endian and calculate CRC8 */ + data_to_crc = cpu_to_be32(validation_string); + crc = crc8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc), + CRC8_INIT_VALUE); + + /* The validation byte [7:0] is composed: + * for type A validation + * [7] = active configuration bit + * [6:0] = crc[6:0] + * + * for type B validation + * [7] = active configuration bit + * [6:3] = connection_type[3:0] + * [2:0] = crc[2:0] + */ + validation_byte |= + ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7; + + if ((validation_cfg >> + CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1) + validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7); + else + validation_byte |= crc & 0x7F; + + return validation_byte; +} + +/* Calcualte and set validation bytes for session context */ +void qed_calc_session_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 cid) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 3, cid); + *t_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 4, cid); + *u_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 5, cid); +} + +/* Calcualte and set validation bytes for task context */ +void qed_calc_task_ctx_validation(void *p_ctx_mem, + u16 ctx_size, u8 ctx_type, u32 tid) +{ + u8 *p_ctx, *region1_val_ptr; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = qed_calc_cdu_validation_byte(ctx_type, 1, tid); +} + +/* Memset session context to 0 while preserving validation bytes */ +void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx; + u8 x_val, t_val, u_val; + + p_ctx = (u8 * const)p_ctx_mem; + x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]]; + t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]]; + u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]]; + + x_val = *x_val_ptr; + t_val = *t_val_ptr; + u_val = *u_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *x_val_ptr = x_val; + *t_val_ptr = t_val; + *u_val_ptr = u_val; +} + +/* Memset task context to 0 while preserving validation bytes */ +void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type) +{ + u8 *p_ctx, *region1_val_ptr; + u8 region1_val; + + p_ctx = (u8 * const)p_ctx_mem; + region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]]; + + region1_val = *region1_val_ptr; + + memset(p_ctx, 0, ctx_size); + + *region1_val_ptr = region1_val; +} + +/* Enable and configure context validation */ +void qed_enable_context_validation(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 ctx_validation; + + /* Enable validation for connection region 3: CCFC_CTX_VALID0[31:24] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation); + + /* Enable validation for connection region 5: CCFC_CTX_VALID1[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation); + + /* Enable validation for connection region 1: TCFC_CTX_VALID0[15:8] */ + ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; + qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); +} + +const char *qed_get_protocol_type_str(u32 protocol_type) +{ + if (protocol_type >= ARRAY_SIZE(s_protocol_types)) + return "Invalid protocol type"; + + return s_protocol_types[protocol_type]; +} + +const char *qed_get_ramrod_cmd_id_str(u32 protocol_type, u32 ramrod_cmd_id) +{ + const char *ramrod_cmd_id_str; + + if (protocol_type >= ARRAY_SIZE(s_ramrod_cmd_ids)) + return "Invalid protocol type"; + + if (ramrod_cmd_id >= ARRAY_SIZE(s_ramrod_cmd_ids[0])) + return "Invalid Ramrod command ID"; + + ramrod_cmd_id_str = s_ramrod_cmd_ids[protocol_type][ramrod_cmd_id]; + + if (!ramrod_cmd_id_str) + return "Invalid Ramrod command ID"; + + return ramrod_cmd_id_str; +} + +static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id); + + default: + return 0; + } +} + +void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u8 assert_level[NUM_STORMS]) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id); + + qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]); + } +} + +#define PHYS_ADDR_DWORDS DIV_ROUND_UP(sizeof(dma_addr_t), 4) +#define OVERLAY_HDR_SIZE_DWORDS (sizeof(struct fw_overlay_buf_hdr) / 4) + +static u32 qed_get_overlay_addr_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id) +{ + switch (storm_id) { + case 0: + return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + TSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 1: + return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + MSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 2: + return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + USTORM_OVERLAY_BUF_ADDR_OFFSET; + case 3: + return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + XSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 4: + return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + YSTORM_OVERLAY_BUF_ADDR_OFFSET; + case 5: + return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + + PSTORM_OVERLAY_BUF_ADDR_OFFSET; + + default: + return 0; + } +} + +struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, + const u32 * const + fw_overlay_in_buf, + u32 buf_size_in_bytes) +{ + u32 buf_size = buf_size_in_bytes / sizeof(u32), buf_offset = 0; + struct phys_mem_desc *allocated_mem; + + if (!buf_size) + return NULL; + + allocated_mem = kcalloc(NUM_STORMS, sizeof(struct phys_mem_desc), + GFP_KERNEL); + if (!allocated_mem) + return NULL; + + /* For each Storm, set physical address in RAM */ + while (buf_offset < buf_size) { + struct phys_mem_desc *storm_mem_desc; + struct fw_overlay_buf_hdr *hdr; + u32 storm_buf_size; + u8 storm_id; + + hdr = + (struct fw_overlay_buf_hdr *)&fw_overlay_in_buf[buf_offset]; + storm_buf_size = GET_FIELD(hdr->data, + FW_OVERLAY_BUF_HDR_BUF_SIZE); + storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID); + if (storm_id >= NUM_STORMS) + break; + storm_mem_desc = allocated_mem + storm_id; + storm_mem_desc->size = storm_buf_size * sizeof(u32); + + /* Allocate physical memory for Storm's overlays buffer */ + storm_mem_desc->virt_addr = + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + storm_mem_desc->size, + &storm_mem_desc->phys_addr, GFP_KERNEL); + if (!storm_mem_desc->virt_addr) + break; + + /* Skip overlays buffer header */ + buf_offset += OVERLAY_HDR_SIZE_DWORDS; + + /* Copy Storm's overlays buffer to allocated memory */ + memcpy(storm_mem_desc->virt_addr, + &fw_overlay_in_buf[buf_offset], storm_mem_desc->size); + + /* Advance to next Storm */ + buf_offset += storm_buf_size; + } + + /* If memory allocation has failed, free all allocated memory */ + if (buf_offset < buf_size) { + qed_fw_overlay_mem_free(p_hwfn, &allocated_mem); + return NULL; + } + + return allocated_mem; +} + +void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct phys_mem_desc *fw_overlay_mem) +{ + u8 storm_id; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)fw_overlay_mem + storm_id; + u32 ram_addr, i; + + /* Skip Storms with no FW overlays */ + if (!storm_mem_desc->virt_addr) + continue; + + /* Calculate overlay RAM GRC address of current PF */ + ram_addr = qed_get_overlay_addr_ram_addr(p_hwfn, storm_id) + + sizeof(dma_addr_t) * p_hwfn->rel_pf_id; + + /* Write Storm's overlay physical address to RAM */ + for (i = 0; i < PHYS_ADDR_DWORDS; i++, ram_addr += sizeof(u32)) + qed_wr(p_hwfn, p_ptt, ram_addr, + ((u32 *)&storm_mem_desc->phys_addr)[i]); + } +} + +void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, + struct phys_mem_desc **fw_overlay_mem) +{ + u8 storm_id; + + if (!fw_overlay_mem || !(*fw_overlay_mem)) + return; + + for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) { + struct phys_mem_desc *storm_mem_desc = + (struct phys_mem_desc *)*fw_overlay_mem + storm_id; + + /* Free Storm's physical memory */ + if (storm_mem_desc->virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + storm_mem_desc->size, + storm_mem_desc->virt_addr, + storm_mem_desc->phys_addr); + } + + /* Free allocated virtual memory */ + kfree(*fw_overlay_mem); + *fw_overlay_mem = NULL; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c new file mode 100644 index 0000000000..b3bf9899c1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_iro_hsi.h" +#include "qed_reg_addr.h" +#include "qed_sriov.h" + +#define QED_INIT_MAX_POLL_COUNT 100 +#define QED_INIT_POLL_PERIOD_US 500 + +static u32 pxp_global_win[] = { + 0, + 0, + 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */ + 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ + 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ + 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ + 0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */ + 0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */ + 0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */ + 0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */ + 0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */ + 0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */ + 0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */ + 0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */ + 0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */ + 0, + 0, + 0, + 0, +}; + +/* IRO Array */ +static const u32 iro_arr[] = { + 0x00000000, 0x00000000, 0x00080000, + 0x00004478, 0x00000008, 0x00080000, + 0x00003288, 0x00000088, 0x00880000, + 0x000058a8, 0x00000020, 0x00200000, + 0x00003188, 0x00000008, 0x00080000, + 0x00000b00, 0x00000008, 0x00040000, + 0x00000a80, 0x00000008, 0x00040000, + 0x00000000, 0x00000008, 0x00020000, + 0x00000080, 0x00000008, 0x00040000, + 0x00000084, 0x00000008, 0x00020000, + 0x00005798, 0x00000004, 0x00040000, + 0x00004e50, 0x00000000, 0x00780000, + 0x00003e40, 0x00000000, 0x00780000, + 0x00004500, 0x00000000, 0x00780000, + 0x00003210, 0x00000000, 0x00780000, + 0x00003b50, 0x00000000, 0x00780000, + 0x00007f58, 0x00000000, 0x00780000, + 0x00005fd8, 0x00000000, 0x00080000, + 0x00007100, 0x00000000, 0x00080000, + 0x0000af20, 0x00000000, 0x00080000, + 0x00004398, 0x00000000, 0x00080000, + 0x0000a5a0, 0x00000000, 0x00080000, + 0x0000bde8, 0x00000000, 0x00080000, + 0x00000020, 0x00000004, 0x00040000, + 0x00005688, 0x00000010, 0x00100000, + 0x0000c210, 0x00000030, 0x00300000, + 0x0000b108, 0x00000038, 0x00380000, + 0x00003d20, 0x00000080, 0x00400000, + 0x0000bf60, 0x00000000, 0x00040000, + 0x00004560, 0x00040080, 0x00040000, + 0x000001f8, 0x00000004, 0x00040000, + 0x00003d60, 0x00000080, 0x00200000, + 0x00008960, 0x00000040, 0x00300000, + 0x0000e840, 0x00000060, 0x00600000, + 0x00004698, 0x00000080, 0x00380000, + 0x000107b8, 0x000000c0, 0x00c00000, + 0x000001f8, 0x00000002, 0x00020000, + 0x0000a260, 0x00000000, 0x01080000, + 0x0000a368, 0x00000008, 0x00080000, + 0x000001c0, 0x00000008, 0x00080000, + 0x000001f8, 0x00000008, 0x00080000, + 0x00000ac0, 0x00000008, 0x00080000, + 0x00002578, 0x00000008, 0x00080000, + 0x000024f8, 0x00000008, 0x00080000, + 0x00000280, 0x00000008, 0x00080000, + 0x00000680, 0x00080018, 0x00080000, + 0x00000b78, 0x00080018, 0x00020000, + 0x0000c600, 0x00000058, 0x003c0000, + 0x00012038, 0x00000020, 0x00100000, + 0x00011b00, 0x00000048, 0x00180000, + 0x00009650, 0x00000050, 0x00200000, + 0x00008b10, 0x00000040, 0x00280000, + 0x000116c0, 0x00000018, 0x00100000, + 0x0000c808, 0x00000048, 0x00380000, + 0x00011790, 0x00000020, 0x00200000, + 0x000046d0, 0x00000080, 0x00100000, + 0x00003618, 0x00000010, 0x00100000, + 0x0000a9e8, 0x00000008, 0x00010000, + 0x000097a0, 0x00000008, 0x00010000, + 0x00011a10, 0x00000008, 0x00010000, + 0x0000e9f8, 0x00000008, 0x00010000, + 0x00012648, 0x00000008, 0x00010000, + 0x000121c8, 0x00000008, 0x00010000, + 0x0000af08, 0x00000030, 0x00100000, + 0x0000d748, 0x00000028, 0x00280000, + 0x00009e68, 0x00000018, 0x00180000, + 0x00009fe8, 0x00000008, 0x00080000, + 0x00013ea8, 0x00000008, 0x00080000, + 0x00012f18, 0x00000018, 0x00180000, + 0x0000dfe8, 0x00500288, 0x00100000, + 0x000131a0, 0x00000138, 0x00280000, +}; + +void qed_init_iro_array(struct qed_dev *cdev) +{ + cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET; +} + +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) +{ + if (rt_offset >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing %u in rt_data at index %u!\n", + val, rt_offset); + return; + } + + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; +} + +void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, + u32 rt_offset, u32 *p_val, size_t size) +{ + size_t i; + + if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) { + DP_ERR(p_hwfn, + "Avoid storing values in rt_data at indices %u-%u!\n", + rt_offset, + (u32)(rt_offset + size - 1)); + return; + } + + for (i = 0; i < size / sizeof(u32); i++) { + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; + } +} + +static int qed_init_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, u16 rt_offset, u16 size, bool b_must_dmae) +{ + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + u16 i, j, segment; + int rc = 0; + + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ + for (i = 0; i < size; i++) { + if (!p_valid[i]) + continue; + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); + p_valid[i] = false; + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(p_init_val + i), + addr + (i << 2), segment, NULL); + if (rc) + return rc; + + /* invalidate after writing */ + for (j = i; j < (u32)(i + segment); j++) + p_valid[j] = false; + + /* Jump over the entire segment, including invalid entry */ + i += segment; + } + + return rc; +} + +int qed_init_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rt_data *rt_data = &p_hwfn->rt_data; + + if (IS_VF(p_hwfn->cdev)) + return 0; + + rt_data->b_valid = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(bool), + GFP_KERNEL); + if (!rt_data->b_valid) + return -ENOMEM; + + rt_data->init_val = kcalloc(RUNTIME_ARRAY_SIZE, sizeof(u32), + GFP_KERNEL); + if (!rt_data->init_val) { + kfree(rt_data->b_valid); + rt_data->b_valid = NULL; + return -ENOMEM; + } + + return 0; +} + +void qed_init_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->rt_data.init_val); + p_hwfn->rt_data.init_val = NULL; + kfree(p_hwfn->rt_data.b_valid); + p_hwfn->rt_data.b_valid = NULL; +} + +static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u32 dmae_data_offset, + u32 size, + const u32 *buf, + bool b_must_dmae, + bool b_can_dmae) +{ + int rc = 0; + + /* Perform DMAE only for lengthy enough sections or for wide-bus */ + if (!b_can_dmae || (!b_must_dmae && (size < 16))) { + const u32 *data = buf + dmae_data_offset; + u32 i; + + for (i = 0; i < size; i++) + qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); + } else { + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(buf + dmae_data_offset), + addr, size, NULL); + } + + return rc; +} + +static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, u32 fill_count) +{ + static u32 zero_buffer[DMAE_MAX_RW_SIZE]; + struct qed_dmae_params params = {}; + + memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); + + /* invoke the DMAE virtual/physical buffer API with + * 1. DMAE init channel + * 2. addr, + * 3. p_hwfb->temp_data, + * 4. fill_count + */ + SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1); + return qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(&zero_buffer[0]), + addr, fill_count, ¶ms); +} + +static void qed_init_fill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, u32 fill, u32 fill_count) +{ + u32 i; + + for (i = 0; i < fill_count; i++, addr += sizeof(u32)) + qed_wr(p_hwfn, p_ptt, addr, fill); +} + +static int qed_init_cmd_array(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_write_op *cmd, + bool b_must_dmae, bool b_can_dmae) +{ + u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset); + u32 data = le32_to_cpu(cmd->data); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + + u32 offset, output_len, input_len, max_size; + struct qed_dev *cdev = p_hwfn->cdev; + union init_array_hdr *hdr; + const u32 *array_data; + int rc = 0; + u32 size; + + array_data = cdev->fw_data->arr_data; + + hdr = (union init_array_hdr *)(array_data + dmae_array_offset); + data = le32_to_cpu(hdr->raw.data); + switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { + case INIT_ARR_ZIPPED: + offset = dmae_array_offset + 1; + input_len = GET_FIELD(data, + INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); + max_size = MAX_ZIPPED_SIZE * 4; + memset(p_hwfn->unzip_buf, 0, max_size); + + output_len = qed_unzip_data(p_hwfn, input_len, + (u8 *)&array_data[offset], + max_size, (u8 *)p_hwfn->unzip_buf); + if (output_len) { + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0, + output_len, + p_hwfn->unzip_buf, + b_must_dmae, b_can_dmae); + } else { + DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n"); + rc = -EINVAL; + } + break; + case INIT_ARR_PATTERN: + { + u32 repeats = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_REPETITIONS); + u32 i; + + size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); + + for (i = 0; i < repeats; i++, addr += size << 2) { + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + if (rc) + break; + } + break; + } + case INIT_ARR_STANDARD: + size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); + rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + break; + } + + return rc; +} + +/* init_ops write command */ +static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_write_op *p_cmd, bool b_can_dmae) +{ + u32 data = le32_to_cpu(p_cmd->data); + bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); + u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + union init_write_args *arg = &p_cmd->args; + int rc = 0; + + /* Sanitize */ + if (b_must_dmae && !b_can_dmae) { + DP_NOTICE(p_hwfn, + "Need to write to %08x for Wide-bus but DMAE isn't allowed\n", + addr); + return -EINVAL; + } + + switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { + case INIT_SRC_INLINE: + data = le32_to_cpu(p_cmd->args.inline_val); + qed_wr(p_hwfn, p_ptt, addr, data); + break; + case INIT_SRC_ZEROS: + data = le32_to_cpu(p_cmd->args.zeros_count); + if (b_must_dmae || (b_can_dmae && (data >= 64))) + rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data); + else + qed_init_fill(p_hwfn, p_ptt, addr, 0, data); + break; + case INIT_SRC_ARRAY: + rc = qed_init_cmd_array(p_hwfn, p_ptt, p_cmd, + b_must_dmae, b_can_dmae); + break; + case INIT_SRC_RUNTIME: + qed_init_rt(p_hwfn, p_ptt, addr, + le16_to_cpu(arg->runtime.offset), + le16_to_cpu(arg->runtime.size), + b_must_dmae); + break; + } + + return rc; +} + +static inline bool comp_eq(u32 val, u32 expected_val) +{ + return val == expected_val; +} + +static inline bool comp_and(u32 val, u32 expected_val) +{ + return (val & expected_val) == expected_val; +} + +static inline bool comp_or(u32 val, u32 expected_val) +{ + return (val | expected_val) > 0; +} + +/* init_ops read/poll commands */ +static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct init_read_op *cmd) +{ + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = QED_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = le32_to_cpu(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); + + val = qed_rd(p_hwfn, p_ptt, addr); + + if (poll == INIT_POLL_NONE) + return; + + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } + + data = le32_to_cpu(cmd->expected_val); + for (i = 0; + i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data); + i++) { + udelay(delay); + val = qed_rd(p_hwfn, p_ptt, addr); + } + + if (i == QED_INIT_MAX_POLL_COUNT) { + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n", + addr, le32_to_cpu(cmd->expected_val), + val, le32_to_cpu(cmd->op_data)); + } +} + +/* init_ops callbacks entry point */ +static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct init_callback_op *p_cmd) +{ + int rc; + + switch (p_cmd->callback_id) { + case DMAE_READY_CB: + rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase"); + break; + default: + DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n", + p_cmd->callback_id); + return -EINVAL; + } + + return rc; +} + +static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, + u16 *p_offset, int modes) +{ + struct qed_dev *cdev = p_hwfn->cdev; + const u8 *modes_tree_buf; + u8 arg1, arg2, tree_val; + + modes_tree_buf = cdev->fw_data->modes_tree_buf; + tree_val = modes_tree_buf[(*p_offset)++]; + switch (tree_val) { + case INIT_MODE_OP_NOT: + return qed_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; + case INIT_MODE_OP_OR: + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 | arg2; + case INIT_MODE_OP_AND: + arg1 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = qed_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 & arg2; + default: + tree_val -= MAX_INIT_MODE_OPS; + return (modes & BIT(tree_val)) ? 1 : 0; + } +} + +static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn, + struct init_if_mode_op *p_cmd, int modes) +{ + u16 offset = le16_to_cpu(p_cmd->modes_buf_offset); + + if (qed_init_cmd_mode_match(p_hwfn, &offset, modes)) + return 0; + else + return GET_FIELD(le32_to_cpu(p_cmd->op_data), + INIT_IF_MODE_OP_CMD_OFFSET); +} + +static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd, + u32 phase, u32 phase_id) +{ + u32 data = le32_to_cpu(p_cmd->phase_data); + u32 op_data = le32_to_cpu(p_cmd->op_data); + + if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && + (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || + GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) + return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET); + else + return 0; +} + +int qed_init_run(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int phase, int phase_id, int modes) +{ + bool b_dmae = (phase != PHASE_ENGINE); + struct qed_dev *cdev = p_hwfn->cdev; + u32 cmd_num, num_init_ops; + union init_op *init_ops; + int rc = 0; + + num_init_ops = cdev->fw_data->init_ops_size; + init_ops = cdev->fw_data->init_ops; + + p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC); + if (!p_hwfn->unzip_buf) + return -ENOMEM; + + for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { + union init_op *cmd = &init_ops[cmd_num]; + u32 data = le32_to_cpu(cmd->raw.op_data); + + switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { + case INIT_OP_WRITE: + rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, + b_dmae); + break; + case INIT_OP_READ: + qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); + break; + case INIT_OP_IF_MODE: + cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode, + modes); + break; + case INIT_OP_IF_PHASE: + cmd_num += qed_init_cmd_phase(&cmd->if_phase, + phase, phase_id); + break; + case INIT_OP_DELAY: + /* qed_init_run is always invoked from + * sleep-able context + */ + udelay(le32_to_cpu(cmd->delay.delay)); + break; + + case INIT_OP_CALLBACK: + rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + if (phase == PHASE_ENGINE && + cmd->callback.callback_id == DMAE_READY_CB) + b_dmae = true; + break; + } + + if (rc) + break; + } + + kfree(p_hwfn->unzip_buf); + p_hwfn->unzip_buf = NULL; + return rc; +} + +void qed_gtt_init(struct qed_hwfn *p_hwfn) +{ + u32 gtt_base; + u32 i; + + /* Set the global windows */ + gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; + + for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++) + if (pxp_global_win[i]) + REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, + pxp_global_win[i]); +} + +int qed_init_fw_data(struct qed_dev *cdev, const u8 *data) +{ + struct qed_fw_data *fw = cdev->fw_data; + struct bin_buffer_hdr *buf_hdr; + u32 offset, len; + + if (!data) { + DP_NOTICE(cdev, "Invalid fw data\n"); + return -EINVAL; + } + + /* First Dword contains metadata and should be skipped */ + buf_hdr = (struct bin_buffer_hdr *)data; + + offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset; + fw->fw_ver_info = (struct fw_ver_info *)(data + offset); + + offset = buf_hdr[BIN_BUF_INIT_CMD].offset; + fw->init_ops = (union init_op *)(data + offset); + + offset = buf_hdr[BIN_BUF_INIT_VAL].offset; + fw->arr_data = (u32 *)(data + offset); + + offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; + fw->modes_tree_buf = (u8 *)(data + offset); + len = buf_hdr[BIN_BUF_INIT_CMD].length; + fw->init_ops_size = len / sizeof(struct init_raw_op); + + offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset; + fw->fw_overlays = (u32 *)(data + offset); + len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length; + fw->fw_overlays_len = len; + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h new file mode 100644 index 0000000000..12e5c4e370 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_INIT_OPS_H +#define _QED_INIT_OPS_H + +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" + +/** + * qed_init_iro_array(): init iro_arr. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_init_iro_array(struct qed_dev *cdev); + +/** + * qed_init_run(): Run the init-sequence. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. + * + * Return: _qed_status_t + */ +int qed_init_run(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + int phase, + int phase_id, + int modes); + +/** + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. + * + * @p_hwfn: HW device data. + * + * Return: _qed_status_t. + */ +int qed_init_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_init_free(): Init HW function deallocate. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_init_free(struct qed_hwfn *p_hwfn); + +/** + * qed_init_store_rt_reg(): Store a configuration value in the RT array. + * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. + * + * Return: Void. + */ +void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 val); + +#define STORE_RT_REG(hwfn, offset, val) \ + qed_init_store_rt_reg(hwfn, offset, val) + +#define OVERWRITE_RT_REG(hwfn, offset, val) \ + qed_init_store_rt_reg(hwfn, offset, val) + +void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, + u32 rt_offset, + u32 *val, + size_t size); + +#define STORE_RT_REG_AGG(hwfn, offset, val) \ + qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val)) + +/** + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. + * + * Return Void. + */ +void qed_gtt_init(struct qed_hwfn *p_hwfn); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c new file mode 100644 index 0000000000..2661c483c6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -0,0 +1,2423 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +struct qed_pi_info { + qed_int_comp_cb_t comp_cb; + void *cookie; +}; + +struct qed_sb_sp_info { + struct qed_sb_info sb_info; + + /* per protocol index data */ + struct qed_pi_info pi_info_arr[PIS_PER_SB]; +}; + +enum qed_attention_type { + QED_ATTN_TYPE_ATTN, + QED_ATTN_TYPE_PARITY, +}; + +#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) + +struct aeu_invert_reg_bit { + char bit_name[30]; + +#define ATTENTION_PARITY (1 << 0) + +#define ATTENTION_LENGTH_MASK (0x00000ff0) +#define ATTENTION_LENGTH_SHIFT (4) +#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ + ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT) +#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) +#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ + ATTENTION_PARITY) + +/* Multiple bits start with this offset */ +#define ATTENTION_OFFSET_MASK (0x000ff000) +#define ATTENTION_OFFSET_SHIFT (12) + +#define ATTENTION_BB_MASK (0x00700000) +#define ATTENTION_BB_SHIFT (20) +#define ATTENTION_BB(value) (value << ATTENTION_BB_SHIFT) +#define ATTENTION_BB_DIFFERENT BIT(23) + +#define ATTENTION_CLEAR_ENABLE BIT(28) + unsigned int flags; + + /* Callback to call if attention will be triggered */ + int (*cb)(struct qed_hwfn *p_hwfn); + + enum block_id block_index; +}; + +struct aeu_invert_reg { + struct aeu_invert_reg_bit bits[32]; +}; + +#define MAX_ATTN_GRPS (8) +#define NUM_ATTN_REGS (9) + +/* Specific HW attention callbacks */ +static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); + + /* This might occur on certain instances; Log it once then mask it */ + DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", + tmp); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, + 0xffffffff); + + return 0; +} + +#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) +static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_VALID); + + if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { + u32 addr, data, length; + + addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_ADDRESS); + data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_DATA); + length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_LENGTH); + + DP_INFO(p_hwfn->cdev, + "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", + addr, length, + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_VF_VALID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_CLIENT), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_BYTE_EN), + data); + } + + return 0; +} + +#define QED_GRC_ATTENTION_VALID_BIT (1 << 0) +#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) +#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) +#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) +#define QED_GRC_ATTENTION_MASTER_MASK (0xf) +#define QED_GRC_ATTENTION_MASTER_SHIFT (24) +#define QED_GRC_ATTENTION_PF_MASK (0xf) +#define QED_GRC_ATTENTION_PF_SHIFT (0) +#define QED_GRC_ATTENTION_VF_MASK (0xff) +#define QED_GRC_ATTENTION_VF_SHIFT (4) +#define QED_GRC_ATTENTION_PRIV_MASK (0x3) +#define QED_GRC_ATTENTION_PRIV_SHIFT (14) +#define QED_GRC_ATTENTION_PRIV_VF (0) +static const char *attn_master_to_str(u8 master) +{ + switch (master) { + case 1: return "PXP"; + case 2: return "MCP"; + case 3: return "MSDM"; + case 4: return "PSDM"; + case 5: return "YSDM"; + case 6: return "USDM"; + case 7: return "TSDM"; + case 8: return "XSDM"; + case 9: return "DBU"; + case 10: return "DMAE"; + default: + return "Unknown"; + } +} + +static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp, tmp2; + + /* We've already cleared the timeout interrupt register, so we learn + * of interrupts via the validity register + */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); + if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) + goto out; + + /* Read the GRC timeout information */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); + tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); + + DP_INFO(p_hwfn->cdev, + "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", + tmp2, tmp, + (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", + GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, + attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), + GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), + (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == + QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", + GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); + +out: + /* Regardles of anything else, clean the validity bit */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); + return 0; +} + +#define PGLUE_ATTENTION_VALID (1 << 29) +#define PGLUE_ATTENTION_RD_VALID (1 << 26) +#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) +#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) +#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) +#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) +#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) +#define PGLUE_ATTENTION_ICPL_VALID (1 << 23) +#define PGLUE_ATTENTION_ZLR_VALID (1 << 25) +#define PGLUE_ATTENTION_ILT_VALID (1 << 23) + +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init) +{ + char msg[256]; + u32 tmp; + + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2); + if (tmp & PGLUE_ATTENTION_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_63_32); + details = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS); + + snprintf(msg, sizeof(msg), + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID), + tmp, + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME), + !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN)); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } + + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2); + if (tmp & PGLUE_ATTENTION_RD_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_63_32); + details = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS); + + DP_NOTICE(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + } + + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) { + snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp); + + if (hw_init) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg); + else + DP_NOTICE(p_hwfn, "%s\n", msg); + } + + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & PGLUE_ATTENTION_ZLR_VALID) { + u32 addr_hi, addr_lo; + + addr_lo = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_NOTICE(p_hwfn, "ZLR error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + + tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + if (tmp & PGLUE_ATTENTION_ILT_VALID) { + u32 addr_hi, addr_lo, details; + + addr_lo = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); + details = qed_rd(p_hwfn, p_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS); + + DP_NOTICE(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); + } + + /* Clear the indications */ + qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, BIT(2)); + + return 0; +} + +static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn) +{ + return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false); +} + +static int qed_fw_assertion(struct qed_hwfn *p_hwfn) +{ + qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, + "FW assertion!\n"); + + /* Clear assert indications */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0); + + return -EINVAL; +} + +static int qed_general_attention_35(struct qed_hwfn *p_hwfn) +{ + DP_INFO(p_hwfn, "General attention 35!\n"); + + return 0; +} + +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) + +#define QED_DB_REC_COUNT 1000 +#define QED_DB_REC_INTERVAL 100 + +static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 count = QED_DB_REC_COUNT; + u32 usage = 1; + + /* Flush any pending (e)dpms as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interpreted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releasing the overflow sticky indication. + */ + while (count-- && usage) { + usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + udelay(QED_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->cdev, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); + return -EBUSY; + } + + return 0; +} + +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 attn_ovfl, cur_ovfl; + int rc; + + attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT, + &p_hwfn->db_recovery_info.overflow); + cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!cur_ovfl && !attn_ovfl) + return 0; + + DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n", + attn_ovfl, cur_ovfl); + + if (cur_ovfl && !p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + return rc; + } + + /* Release overflow sticky indication (stop silently dropping everything) */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* Repeat all last doorbells (doorbell drop recovery) */ + qed_db_recovery_execute(p_hwfn); + + return 0; +} + +static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn) +{ + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + u32 overflow; + int rc; + + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + if (!overflow) + goto out; + + /* Run PF doorbell recovery in next periodic handler */ + set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow); + + if (!p_hwfn->db_bar_no_edpm) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + goto out; + } + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); +out: + /* Schedule the handler even if overflow was not detected */ + qed_periodic_db_rec_start(p_hwfn); +} + +static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) +{ + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + if (int_sts == 0xdeadbeaf) { + DP_NOTICE(p_hwfn->cdev, + "DORQ is being reset, skipping int_sts handler\n"); + + return 0; + } + + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again. + * The masked almost full indication may also be set. Ignoring. + */ + if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) + return 0; + + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* Obtain data about db drop/overflow */ + first_drop_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); + address = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* Log info */ + DP_NOTICE(p_hwfn->cdev, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + /* Clear the doorbell drop details and prepare for next drop */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* Mark interrupt as handled (note: even if drop was due to a different + * reason than overflow we mark as handled) + */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* If there are no indications other than drop indications, success */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return 0; + } + + /* Some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + + return -EINVAL; +} + +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->cdev->recov_in_prog) + return 0; + + p_hwfn->db_recovery_info.dorq_attn = true; + qed_dorq_attn_overflow(p_hwfn); + + return qed_dorq_attn_int_sts(p_hwfn); +} + +static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->db_recovery_info.dorq_attn) + goto out; + + /* Call DORQ callback if the attention was missed */ + qed_dorq_attn_cb(p_hwfn); +out: + p_hwfn->db_recovery_info.dorq_attn = false; +} + +/* Instead of major changes to the data-structure, we have a some 'special' + * identifiers for sources that changed meaning between adapters. + */ +enum aeu_invert_reg_special_type { + AEU_INVERT_REG_SPECIAL_CNIG_0, + AEU_INVERT_REG_SPECIAL_CNIG_1, + AEU_INVERT_REG_SPECIAL_CNIG_2, + AEU_INVERT_REG_SPECIAL_CNIG_3, + AEU_INVERT_REG_SPECIAL_MAX, +}; + +static struct aeu_invert_reg_bit +aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = { + {"CNIG port 0", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 1", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 2", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, + {"CNIG port 3", ATTENTION_SINGLE, NULL, BLOCK_CNIG}, +}; + +/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ +static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { + { + { /* After Invert 1 */ + {"GPIO0 function%d", + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 2 */ + {"PGLUE config_space", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, + qed_pglueb_rbc_attn_cb, BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | + (1 << ATTENTION_OFFSET_SHIFT), + NULL, MAX_BLOCK_ID}, + {"PCIE glue/PXP VPD %d", + (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, + } + }, + + { + { /* After Invert 3 */ + {"General Attention %d", + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 4 */ + {"General Attention 32", ATTENTION_SINGLE | + ATTENTION_CLEAR_ENABLE, qed_fw_assertion, + MAX_BLOCK_ID}, + {"General Attention %d", + (2 << ATTENTION_LENGTH_SHIFT) | + (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, + {"General Attention 35", ATTENTION_SINGLE | + ATTENTION_CLEAR_ENABLE, qed_general_attention_35, + MAX_BLOCK_ID}, + {"NWS Parity", + ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0), + NULL, BLOCK_NWS}, + {"NWS Interrupt", + ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1), + NULL, BLOCK_NWS}, + {"NWM Parity", + ATTENTION_PAR | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2), + NULL, BLOCK_NWM}, + {"NWM Interrupt", + ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT | + ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3), + NULL, BLOCK_NWM}, + {"MCP CPU", ATTENTION_SINGLE, + qed_mcp_attn_cb, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"AVS stop status ready", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, + } + }, + + { + { /* After Invert 5 */ + {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, + } + }, + + { + { /* After Invert 6 */ + {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, + qed_dorq_attn_cb, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, + } + }, + + { + { /* After Invert 7 */ + {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, + } + }, + + { + { /* After Invert 8 */ + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, + qed_pswhst_attn_cb, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, + qed_grc_attn_cb, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, + NULL, BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PERST_B deassertion", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 9 */ + {"MCP Latched memory", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MCP Latched ump_tx", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, + {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), + NULL, MAX_BLOCK_ID}, + } + }, +}; + +static struct aeu_invert_reg_bit * +qed_int_aeu_translate(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + if (!QED_IS_BB(p_hwfn->cdev)) + return p_bit; + + if (!(p_bit->flags & ATTENTION_BB_DIFFERENT)) + return p_bit; + + return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >> + ATTENTION_BB_SHIFT]; +} + +static bool qed_int_is_parity_flag(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_bit) +{ + return !!(qed_int_aeu_translate(p_hwfn, p_bit)->flags & + ATTENTION_PARITY); +} + +#define ATTN_STATE_BITS (0xfff) +#define ATTN_BITS_MASKABLE (0x3ff) +struct qed_sb_attn_info { + /* Virtual & Physical address of the SB */ + struct atten_status_block *sb_attn; + dma_addr_t sb_phys; + + /* Last seen running index */ + u16 index; + + /* A mask of the AEU bits resulting in a parity error */ + u32 parity_mask[NUM_ATTN_REGS]; + + /* A pointer to the attention description structure */ + struct aeu_invert_reg *p_aeu_desc; + + /* Previously asserted attentions, which are still unasserted */ + u16 known_attn; + + /* Cleanup address for the link's general hw attention */ + u32 mfw_attn_addr; +}; + +static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, + struct qed_sb_attn_info *p_sb_desc) +{ + u16 rc = 0, index; + + index = le16_to_cpu(p_sb_desc->sb_attn->sb_index); + if (p_sb_desc->index != index) { + p_sb_desc->index = index; + rc = QED_SB_ATT_IDX; + } + + return rc; +} + +/** + * qed_int_assertion() - Handle asserted attention bits. + * + * @p_hwfn: HW device data. + * @asserted_bits: Newly asserted bits. + * + * Return: Zero value. + */ +static int qed_int_assertion(struct qed_hwfn *p_hwfn, u16 asserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 igu_mask; + + /* Mask the source of the attention in the IGU */ + igu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", + igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); + igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "inner known ATTN state: 0x%04x --> 0x%04x\n", + sb_attn_sw->known_attn, + sb_attn_sw->known_attn | asserted_bits); + sb_attn_sw->known_attn |= asserted_bits; + + /* Handle MCP events */ + if (asserted_bits & 0x100) { + qed_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); + /* Clean the MCP attention */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + sb_attn_sw->mfw_attn_addr, 0); + } + + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_SET_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + (u32)asserted_bits); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "set cmd IGU: 0x%04x\n", + asserted_bits); + + return 0; +} + +static void qed_int_attn_print(struct qed_hwfn *p_hwfn, + enum block_id id, + enum dbg_attn_type type, bool b_clear) +{ + struct dbg_attn_block_result attn_results; + enum dbg_status status; + + memset(&attn_results, 0, sizeof(attn_results)); + + status = qed_dbg_read_attn(p_hwfn, p_hwfn->p_dpc_ptt, id, type, + b_clear, &attn_results); + if (status != DBG_STATUS_OK) + DP_NOTICE(p_hwfn, + "Failed to parse attention information [status: %s]\n", + qed_dbg_get_status_str(status)); + else + qed_dbg_parse_attn(p_hwfn, &attn_results); +} + +/** + * qed_int_deassertion_aeu_bit() - Handles the effects of a single + * cause of the attention. + * + * @p_hwfn: HW device data. + * @p_aeu: Descriptor of an AEU bit which caused the attention. + * @aeu_en_reg: Register offset of the AEU enable reg. which configured + * this bit to this group. + * @p_bit_name: AEU bit description for logging purposes. + * @bitmask: Index of this bit in the aeu_en_reg. + * + * Return: Zero on success, negative errno otherwise. + */ +static int +qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, + const char *p_bit_name, u32 bitmask) +{ + bool b_fatal = false; + int rc = -EINVAL; + u32 val; + + DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", + p_bit_name, bitmask); + + /* Call callback before clearing the interrupt status */ + if (p_aeu->cb) { + DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", + p_bit_name); + rc = p_aeu->cb(p_hwfn); + } + + if (rc) + b_fatal = true; + + /* Print HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) + qed_int_attn_print(p_hwfn, p_aeu->block_index, + ATTN_TYPE_INTERRUPT, !b_fatal); + + /* Reach assertion if attention is fatal */ + if (b_fatal) + qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_HW_ATTN, + "`%s': Fatal attention\n", + p_bit_name); + else /* If the attention is benign, no need to prevent it */ + goto out; + + /* Prevent this Attention from being asserted in the future */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); + DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", + p_bit_name); + + /* Re-enable FW aassertion (Gen 32) interrupts */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0); + val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32; + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0, val); + +out: + return rc; +} + +/** + * qed_int_deassertion_parity() - Handle a single parity AEU source. + * + * @p_hwfn: HW device data. + * @p_aeu: Descriptor of an AEU bit which caused the parity. + * @aeu_en_reg: Address of the AEU enable register. + * @bit_index: Index (0-31) of an AEU bit. + */ +static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, u8 bit_index) +{ + u32 block_id = p_aeu->block_index, mask, val; + + DP_NOTICE(p_hwfn->cdev, + "%s parity attention is set [address 0x%08x, bit %d]\n", + p_aeu->bit_name, aeu_en_reg, bit_index); + + if (block_id != MAX_BLOCK_ID) { + qed_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false); + + /* In BB, there's a single parity bit for several blocks */ + if (block_id == BLOCK_BTB) { + qed_int_attn_print(p_hwfn, BLOCK_OPTE, + ATTN_TYPE_PARITY, false); + qed_int_attn_print(p_hwfn, BLOCK_MCP, + ATTN_TYPE_PARITY, false); + } + } + + /* Prevent this parity error from being re-asserted */ + mask = ~BIT(bit_index); + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask); + DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n", + p_aeu->bit_name); +} + +/** + * qed_int_deassertion() - Handle deassertion of previously asserted + * attentions. + * + * @p_hwfn: HW device data. + * @deasserted_bits: newly deasserted bits. + * + * Return: Zero value. + */ +static int qed_int_deassertion(struct qed_hwfn *p_hwfn, + u16 deasserted_bits) +{ + struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en; + u8 i, j, k, bit_idx; + int rc = 0; + + /* Read the attention registers in the AEU */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_AFTER_INVERT_1_IGU + + i * 0x4); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Deasserted bits [%d]: %08x\n", + i, aeu_inv_arr[i]); + } + + /* Find parity attentions first */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; + u32 parities; + + aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32); + en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + + /* Skip register in which no parity bit is currently set */ + parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; + if (!parities) + continue; + + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { + struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; + + if (qed_int_is_parity_flag(p_hwfn, p_bit) && + !!(parities & BIT(bit_idx))) + qed_int_deassertion_parity(p_hwfn, p_bit, + aeu_en, bit_idx); + + bit_idx += ATTENTION_LENGTH(p_bit->flags); + } + } + + /* Find non-parity cause for attention and act */ + for (k = 0; k < MAX_ATTN_GRPS; k++) { + struct aeu_invert_reg_bit *p_aeu; + + /* Handle only groups whose attention is currently deasserted */ + if (!(deasserted_bits & (1 << k))) + continue; + + for (i = 0; i < NUM_ATTN_REGS; i++) { + u32 bits; + + aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32) + + k * sizeof(u32) * NUM_ATTN_REGS; + + en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + bits = aeu_inv_arr[i] & en; + + /* Skip if no bit from this group is currently set */ + if (!bits) + continue; + + /* Find all set bits from current register which belong + * to current group, making them responsible for the + * previous assertion. + */ + for (j = 0, bit_idx = 0; bit_idx < 32 && j < 32; j++) { + long unsigned int bitmask; + u8 bit, bit_len; + + p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; + p_aeu = qed_int_aeu_translate(p_hwfn, p_aeu); + + bit = bit_idx; + bit_len = ATTENTION_LENGTH(p_aeu->flags); + if (qed_int_is_parity_flag(p_hwfn, p_aeu)) { + /* Skip Parity */ + bit++; + bit_len--; + } + + bitmask = bits & (((1 << bit_len) - 1) << bit); + bitmask >>= bit; + + if (bitmask) { + u32 flags = p_aeu->flags; + char bit_name[30]; + u8 num; + + num = (u8)find_first_bit(&bitmask, + bit_len); + + /* Some bits represent more than a + * single interrupt. Correctly print + * their name. + */ + if (ATTENTION_LENGTH(flags) > 2 || + ((flags & ATTENTION_PAR_INT) && + ATTENTION_LENGTH(flags) > 1)) + snprintf(bit_name, 30, + p_aeu->bit_name, num); + else + strscpy(bit_name, + p_aeu->bit_name, 30); + + /* We now need to pass bitmask in its + * correct position. + */ + bitmask <<= bit; + + /* Handle source of the attention */ + qed_int_deassertion_aeu_bit(p_hwfn, + p_aeu, + aeu_en, + bit_name, + bitmask); + } + + bit_idx += ATTENTION_LENGTH(p_aeu->flags); + } + } + } + + /* Handle missed DORQ attention */ + qed_dorq_attn_handler(p_hwfn); + + /* Clear IGU indication for the deasserted bits */ + DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + ~((u32)deasserted_bits)); + + /* Unmask deasserted attentions in IGU */ + aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE); + aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); + + /* Clear deassertion from inner state */ + sb_attn_sw->known_attn &= ~deasserted_bits; + + return rc; +} + +static int qed_int_attentions(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; + struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; + u32 attn_bits = 0, attn_acks = 0; + u16 asserted_bits, deasserted_bits; + __le16 index; + int rc = 0; + + /* Read current attention bits/acks - safeguard against attentions + * by guaranting work on a synchronized timeframe + */ + do { + index = p_sb_attn->sb_index; + /* finish reading index before the loop condition */ + dma_rmb(); + attn_bits = le32_to_cpu(p_sb_attn->atten_bits); + attn_acks = le32_to_cpu(p_sb_attn->atten_ack); + } while (index != p_sb_attn->sb_index); + p_sb_attn->sb_index = index; + + /* Attention / Deassertion are meaningful (and in correct state) + * only when they differ and consistent with known state - deassertion + * when previous attention & current ack, and assertion when current + * attention with no previous attention + */ + asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & + ~p_sb_attn_sw->known_attn; + deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & + p_sb_attn_sw->known_attn; + + if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) { + DP_INFO(p_hwfn, + "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + } else if (asserted_bits == 0x100) { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication via attention\n"); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "MFW indication [deassertion]\n"); + } + + if (asserted_bits) { + rc = qed_int_assertion(p_hwfn, asserted_bits); + if (rc) + return rc; + } + + if (deasserted_bits) + rc = qed_int_deassertion(p_hwfn, deasserted_bits); + + return rc; +} + +static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn, + void __iomem *igu_addr, u32 ack_cons) +{ + u32 igu_ack; + + igu_ack = ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(igu_addr, igu_ack); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + barrier(); +} + +void qed_int_sp_dpc(struct tasklet_struct *t) +{ + struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc); + struct qed_pi_info *pi_info = NULL; + struct qed_sb_attn_info *sb_attn; + struct qed_sb_info *sb_info; + int arr_size; + u16 rc = 0; + + if (!p_hwfn->p_sp_sb) { + DP_ERR(p_hwfn->cdev, "DPC called - no p_sp_sb\n"); + return; + } + + sb_info = &p_hwfn->p_sp_sb->sb_info; + arr_size = ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); + if (!sb_info) { + DP_ERR(p_hwfn->cdev, + "Status block is NULL - cannot ack interrupts\n"); + return; + } + + if (!p_hwfn->p_sb_attn) { + DP_ERR(p_hwfn->cdev, "DPC called - no p_sb_attn"); + return; + } + sb_attn = p_hwfn->p_sb_attn; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "DPC Called! (hwfn %p %d)\n", + p_hwfn, p_hwfn->my_id); + + /* Disable ack for def status block. Required both for msix + + * inta in non-mask mode, in inta does no harm. + */ + qed_sb_ack(sb_info, IGU_INT_DISABLE, 0); + + /* Gather Interrupts/Attentions information */ + if (!sb_info->sb_virt) { + DP_ERR(p_hwfn->cdev, + "Interrupt Status block is NULL - cannot check for new interrupts!\n"); + } else { + u32 tmp_index = sb_info->sb_ack; + + rc = qed_sb_update_sb_idx(sb_info); + DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, + "Interrupt indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_info->sb_ack); + } + + if (!sb_attn || !sb_attn->sb_attn) { + DP_ERR(p_hwfn->cdev, + "Attentions Status block is NULL - cannot check for new attentions!\n"); + } else { + u16 tmp_index = sb_attn->index; + + rc |= qed_attn_update_idx(p_hwfn, sb_attn); + DP_VERBOSE(p_hwfn->cdev, NETIF_MSG_INTR, + "Attention indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_attn->index); + } + + /* Check if we expect interrupts at this time. if not just ack them */ + if (!(rc & QED_SB_EVENT_MASK)) { + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + /* Check the validity of the DPC ptt. If not ack interrupts and fail */ + if (!p_hwfn->p_dpc_ptt) { + DP_NOTICE(p_hwfn->cdev, "Failed to allocate PTT\n"); + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + if (rc & QED_SB_ATT_IDX) + qed_int_attentions(p_hwfn); + + if (rc & QED_SB_IDX) { + int pi; + + /* Look for a free index */ + for (pi = 0; pi < arr_size; pi++) { + pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; + if (pi_info->comp_cb) + pi_info->comp_cb(p_hwfn, pi_info->cookie); + } + } + + if (sb_attn && (rc & QED_SB_ATT_IDX)) + /* This should be done before the interrupts are enabled, + * since otherwise a new attention will be generated. + */ + qed_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); + + qed_sb_ack(sb_info, IGU_INT_ENABLE, 1); +} + +static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (!p_sb) + return; + + if (p_sb->sb_attn) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + p_sb->sb_attn, p_sb->sb_phys); + kfree(p_sb); + p_hwfn->p_sb_attn = NULL; +} + +static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + memset(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); + + sb_info->index = 0; + sb_info->known_attn = 0; + + /* Configure Attention Status Block in IGU */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, + lower_32_bits(p_hwfn->p_sb_attn->sb_phys)); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, + upper_32_bits(p_hwfn->p_sb_attn->sb_phys)); +} + +static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + void *sb_virt_addr, dma_addr_t sb_phy_addr) +{ + struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + int i, j, k; + + sb_info->sb_attn = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + /* Set the pointer to the AEU descriptors */ + sb_info->p_aeu_desc = aeu_descs; + + /* Calculate Parity Masks */ + memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); + for (i = 0; i < NUM_ATTN_REGS; i++) { + /* j is array index, k is bit index */ + for (j = 0, k = 0; k < 32 && j < 32; j++) { + struct aeu_invert_reg_bit *p_aeu; + + p_aeu = &aeu_descs[i].bits[j]; + if (qed_int_is_parity_flag(p_hwfn, p_aeu)) + sb_info->parity_mask[i] |= 1 << k; + + k += ATTENTION_LENGTH(p_aeu->flags); + } + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Attn Mask [Reg %d]: 0x%08x\n", + i, sb_info->parity_mask[i]); + } + + /* Set the address of cleanup for the mcp attention */ + sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + + MISC_REG_AEU_GENERAL_ATTN_0; + + qed_int_sb_attn_setup(p_hwfn, p_ptt); +} + +static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_sb_attn_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); + if (!p_sb) + return -ENOMEM; + + /* SB ring */ + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + &p_phys, GFP_KERNEL); + + if (!p_virt) { + kfree(p_sb); + return -ENOMEM; + } + + /* Attention setup */ + p_hwfn->p_sb_attn = p_sb; + qed_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); + + return 0; +} + +/* coalescing timeout = timeset << (timer_res + 1) */ +#define QED_CAU_DEF_RX_USECS 24 +#define QED_CAU_DEF_TX_USECS 48 + +void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, u16 vf_number, u8 vf_valid) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 cau_state, params = 0, data = 0; + u8 timer_res; + + memset(p_sb_entry, 0, sizeof(*p_sb_entry)); + + SET_FIELD(params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + + cau_state = CAU_HC_DISABLE_STATE; + + if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + cau_state = CAU_HC_ENABLE_STATE; + if (!cdev->rx_coalesce_usecs) + cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; + if (!cdev->tx_coalesce_usecs) + cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; + } + + /* Coalesce = (timeset << timer-res), timeset is 7bit wide */ + if (cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + if (cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + p_sb_entry->params = cpu_to_le32(params); + + SET_FIELD(data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(data, CAU_SB_ENTRY_STATE1, cau_state); + p_sb_entry->data = cpu_to_le32(data); +} + +static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum qed_coalescing_fsm coalescing_fsm, + u8 timeset) +{ + u32 sb_offset, pi_offset; + u32 prod = 0; + + if (IS_VF(p_hwfn->cdev)) + return; + + SET_FIELD(prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == QED_COAL_RX_STATE_MACHINE) + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1); + + sb_offset = igu_sb_id * PIS_PER_SB; + pi_offset = sb_offset + pi_index; + + if (p_hwfn->hw_init_done) + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), prod); + else + STORE_RT_REG(p_hwfn, CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + prod); +} + +void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, u16 vf_number, u8 vf_valid) +{ + struct cau_sb_entry sb_entry; + + qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, + vf_number, vf_valid); + + if (p_hwfn->hw_init_done) { + /* Wide-bus, initialize via DMAE */ + u64 phys_addr = (u64)sb_phys; + + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, + CAU_REG_SB_ADDR_MEMORY + + igu_sb_id * sizeof(u64), 2, NULL); + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_sb_id * sizeof(u64), 2, NULL); + } else { + /* Initialize Status Block Address */ + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + + igu_sb_id * 2, + sb_phys); + + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_VAR_MEMORY_RT_OFFSET + + igu_sb_id * 2, + sb_entry); + } + + /* Configure pi coalescing if set */ + if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + u8 num_tc = p_hwfn->hw_info.num_hw_tc; + u8 timeset, timer_res; + u8 i; + + /* timeset = (coalesce >> timer-res), timeset is 7bit wide */ + if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res); + qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, + QED_COAL_RX_STATE_MACHINE, timeset); + + if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F) + timer_res = 0; + else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF) + timer_res = 1; + else + timer_res = 2; + timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res); + for (i = 0; i < num_tc; i++) { + qed_int_cau_conf_pi(p_hwfn, p_ptt, + igu_sb_id, TX_PI(i), + QED_COAL_TX_STATE_MACHINE, + timeset); + } + } +} + +void qed_int_sb_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_sb_info *sb_info) +{ + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + if (IS_PF(p_hwfn->cdev)) + qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); +} + +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !(p_block->status & QED_IGU_STATUS_FREE)) + continue; + + if (!!(p_block->status & QED_IGU_STATUS_PF) == b_is_pf) + return p_block; + } + + return NULL; +} + +static u16 qed_get_pf_igu_sb_id(struct qed_hwfn *p_hwfn, u16 vector_id) +{ + struct qed_igu_block *p_block; + u16 igu_id; + + for (igu_id = 0; igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); + igu_id++) { + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + p_block->vector_number != vector_id) + continue; + + return igu_id; + } + + return QED_SB_INVALID_IDX; +} + +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + u16 igu_sb_id; + + /* Assuming continuous set of IGU SBs dedicated for given PF */ + if (sb_id == QED_SP_SB_ID) + igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + else if (IS_PF(p_hwfn->cdev)) + igu_sb_id = qed_get_pf_igu_sb_id(p_hwfn, sb_id + 1); + else + igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_id == QED_SP_SB_ID) + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); + else + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); + + return igu_sb_id; +} + +int qed_int_sb_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info, + void *sb_virt_addr, dma_addr_t sb_phy_addr, u16 sb_id) +{ + sb_info->sb_virt = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + sb_info->igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_id != QED_SP_SB_ID) { + if (IS_PF(p_hwfn->cdev)) { + struct qed_igu_info *p_info; + struct qed_igu_block *p_block; + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + p_block->sb_info = sb_info; + p_block->status &= ~QED_IGU_STATUS_FREE; + p_info->usage.free_cnt--; + } else { + qed_vf_set_sb_info(p_hwfn, sb_id, sb_info); + } + } + + sb_info->cdev = p_hwfn->cdev; + + /* The igu address will hold the absolute address that needs to be + * written to for a specific status block + */ + if (IS_PF(p_hwfn->cdev)) { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + (sb_info->igu_sb_id << 3); + } else { + sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_IGU + + ((IGU_CMD_INT_ACK_BASE + + sb_info->igu_sb_id) << 3); + } + + sb_info->flags |= QED_SB_INFO_INIT; + + qed_int_sb_setup(p_hwfn, p_ptt, sb_info); + + return 0; +} + +int qed_int_sb_release(struct qed_hwfn *p_hwfn, + struct qed_sb_info *sb_info, u16 sb_id) +{ + struct qed_igu_block *p_block; + struct qed_igu_info *p_info; + + if (!sb_info) + return 0; + + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + if (IS_VF(p_hwfn->cdev)) { + qed_vf_set_sb_info(p_hwfn, sb_id, NULL); + return 0; + } + + p_info = p_hwfn->hw_info.p_igu_info; + p_block = &p_info->entry[sb_info->igu_sb_id]; + + /* Vector 0 is reserved to Default SB */ + if (!p_block->vector_number) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return -EINVAL; + } + + /* Lose reference to client's SB info, and fix counters */ + p_block->sb_info = NULL; + p_block->status |= QED_IGU_STATUS_FREE; + p_info->usage.free_cnt++; + + return 0; +} + +static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) +{ + struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; + + if (!p_sb) + return; + + if (p_sb->sb_info.sb_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys); + kfree(p_sb); + p_hwfn->p_sp_sb = NULL; +} + +static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_sb_sp_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); + if (!p_sb) + return -ENOMEM; + + /* SB ring */ + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + &p_phys, GFP_KERNEL); + if (!p_virt) { + kfree(p_sb); + return -ENOMEM; + } + + /* Status Block setup */ + p_hwfn->p_sp_sb = p_sb; + qed_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, p_virt, + p_phys, QED_SP_SB_ID); + + memset(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); + + return 0; +} + +int qed_int_register_cb(struct qed_hwfn *p_hwfn, + qed_int_comp_cb_t comp_cb, + void *cookie, u8 *sb_idx, __le16 **p_fw_cons) +{ + struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + int rc = -ENOMEM; + u8 pi; + + /* Look for a free index */ + for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { + if (p_sp_sb->pi_info_arr[pi].comp_cb) + continue; + + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + rc = 0; + break; + } + + return rc; +} + +int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) +{ + struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + + if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) + return -ENOMEM; + + p_sp_sb->pi_info_arr[pi].comp_cb = NULL; + p_sp_sb->pi_info_arr[pi].cookie = NULL; + + return 0; +} + +u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) +{ + return p_hwfn->p_sp_sb->sb_info.igu_sb_id; +} + +void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN; + + p_hwfn->cdev->int_mode = int_mode; + switch (p_hwfn->cdev->int_mode) { + case QED_INT_MODE_INTA: + igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case QED_INT_MODE_MSI: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case QED_INT_MODE_MSIX: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + break; + case QED_INT_MODE_POLL: + break; + } + + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); +} + +static void qed_int_igu_enable_attn(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + + /* Configure AEU signal change to produce attentions */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); + qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); + + /* Unmask AEU signals toward IGU */ + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +int +qed_int_igu_enable(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_int_mode int_mode) +{ + int rc = 0; + + qed_int_igu_enable_attn(p_hwfn, p_ptt); + + if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = qed_slowpath_irq_req(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n"); + return -EINVAL; + } + p_hwfn->b_int_requested = true; + } + /* Enable interrupt Generation */ + qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + p_hwfn->b_int_enabled = 1; + + return rc; +} + +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->b_int_enabled = 0; + + if (IS_VF(p_hwfn->cdev)) + return; + + qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); +} + +#define IGU_CLEANUP_SLEEP_LENGTH (1000) +static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + bool cleanup_set, u16 opaque_fid) +{ + u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; + u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id; + u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; + + /* Set the data field */ + SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); + SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, 0); + SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); + + /* Set the control register */ + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); + + qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); + + barrier(); + + qed_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); + + /* calculate where to read the status bit from */ + sb_bit = 1 << (igu_sb_id % 32); + sb_bit_addr = igu_sb_id / 32 * sizeof(u32); + + sb_bit_addr += IGU_REG_CLEANUP_STATUS_0; + + /* Now wait for the command to complete */ + do { + val = qed_rd(p_hwfn, p_ptt, sb_bit_addr); + + if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) + break; + + usleep_range(5000, 10000); + } while (--sleep_cnt); + + if (!sleep_cnt) + DP_NOTICE(p_hwfn, + "Timeout waiting for clear status 0x%08x [for sb %d]\n", + val, igu_sb_id); +} + +void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, u16 opaque, bool b_set) +{ + struct qed_igu_block *p_block; + int pi, i; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, p_block->vector_number); + + /* Set */ + if (b_set) + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque); + + /* Clear */ + qed_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque); + + /* Wait for the IGU SB to cleanup */ + for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) { + u32 val; + + val = qed_rd(p_hwfn, p_ptt, + IGU_REG_WRITE_DONE_PENDING + + ((igu_sb_id / 32) * 4)); + if (val & BIT((igu_sb_id % 32))) + usleep_range(10, 20); + else + break; + } + if (i == IGU_CLEANUP_SLEEP_LENGTH) + DP_NOTICE(p_hwfn, + "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n", + igu_sb_id); + + /* Clear the CAU for the SB */ + for (pi = 0; pi < 12; pi++) + qed_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0); +} + +void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_set, bool b_slowpath) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct qed_igu_block *p_block; + u16 igu_sb_id = 0; + u32 val = 0; + + val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); + val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; + val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; + qed_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); + + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + + if (!(p_block->status & QED_IGU_STATUS_VALID) || + !p_block->is_pf || + (p_block->status & QED_IGU_STATUS_DSB)) + continue; + + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id, + p_hwfn->hw_info.opaque_fid, + b_set); + } + + if (b_slowpath) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + p_info->igu_dsb_id, + p_hwfn->hw_info.opaque_fid, + b_set); +} + +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + struct qed_igu_block *p_block; + int pf_sbs, vf_sbs; + u16 igu_sb_id; + u32 val, rval; + + if (!RESC_NUM(p_hwfn, QED_SB)) { + p_info->b_allow_pf_vf_change = false; + } else { + /* Use the numbers the MFW have provided - + * don't forget MFW accounts for the default SB as well. + */ + p_info->b_allow_pf_vf_change = true; + + if (p_info->usage.cnt != RESC_NUM(p_hwfn, QED_SB) - 1) { + DP_INFO(p_hwfn, + "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n", + RESC_NUM(p_hwfn, QED_SB) - 1, + p_info->usage.cnt); + p_info->usage.cnt = RESC_NUM(p_hwfn, QED_SB) - 1; + } + + if (IS_PF_SRIOV(p_hwfn)) { + u16 vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + if (vfs != p_info->usage.iov_cnt) + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n", + p_info->usage.iov_cnt, vfs); + + /* At this point we know how many SBs we have totally + * in IGU + number of PF SBs. So we can validate that + * we'd have sufficient for VF. + */ + if (vfs > p_info->usage.free_cnt + + p_info->usage.free_cnt_iov - p_info->usage.cnt) { + DP_NOTICE(p_hwfn, + "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n", + p_info->usage.free_cnt + + p_info->usage.free_cnt_iov, + p_info->usage.cnt, vfs); + return -EINVAL; + } + + /* Currently cap the number of VFs SBs by the + * number of VFs. + */ + p_info->usage.iov_cnt = vfs; + } + } + + /* Mark all SBs as free, now in the right PF/VFs division */ + p_info->usage.free_cnt = p_info->usage.cnt; + p_info->usage.free_cnt_iov = p_info->usage.iov_cnt; + p_info->usage.orig = p_info->usage.cnt; + p_info->usage.iov_orig = p_info->usage.iov_cnt; + + /* We now proceed to re-configure the IGU cam to reflect the initial + * configuration. We can start with the Default SB. + */ + pf_sbs = p_info->usage.cnt; + vf_sbs = p_info->usage.iov_cnt; + + for (igu_sb_id = p_info->igu_dsb_id; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + p_block = &p_info->entry[igu_sb_id]; + val = 0; + + if (!(p_block->status & QED_IGU_STATUS_VALID)) + continue; + + if (p_block->status & QED_IGU_STATUS_DSB) { + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_DSB; + } else if (pf_sbs) { + pf_sbs--; + p_block->function_id = p_hwfn->rel_pf_id; + p_block->is_pf = 1; + p_block->vector_number = p_info->usage.cnt - pf_sbs; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_PF | + QED_IGU_STATUS_FREE; + } else if (vf_sbs) { + p_block->function_id = + p_hwfn->cdev->p_iov_info->first_vf_in_pf + + p_info->usage.iov_cnt - vf_sbs; + p_block->is_pf = 0; + p_block->vector_number = 0; + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + vf_sbs--; + } else { + p_block->function_id = 0; + p_block->is_pf = 0; + p_block->vector_number = 0; + } + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, + p_block->function_id); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf); + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, + p_block->vector_number); + + /* VF entries would be enabled when VF is initializaed */ + SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf); + + rval = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + + if (rval != val) { + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * igu_sb_id, val); + + DP_VERBOSE(p_hwfn, + NETIF_MSG_INTR, + "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n", + igu_sb_id, + p_block->function_id, + p_block->is_pf, + p_block->vector_number, rval, val); + } + } + + return 0; +} + +static void qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 igu_sb_id) +{ + u32 val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id); + struct qed_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id]; + + /* Fill the block information */ + p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); + p_block->igu_sb_id = igu_sb_id; +} + +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_igu_info *p_igu_info; + struct qed_igu_block *p_block; + u32 min_vf = 0, max_vf = 0; + u16 igu_sb_id; + + p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); + if (!p_hwfn->hw_info.p_igu_info) + return -ENOMEM; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + /* Distinguish between existent and non-existent default SB */ + p_igu_info->igu_dsb_id = QED_SB_INVALID_IDX; + + /* Find the range of VF ids whose SB belong to this PF */ + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + min_vf = p_iov->first_vf_in_pf; + max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs; + } + + for (igu_sb_id = 0; + igu_sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev); igu_sb_id++) { + /* Read current entry; Notice it might not belong to this PF */ + qed_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id); + p_block = &p_igu_info->entry[igu_sb_id]; + + if ((p_block->is_pf) && + (p_block->function_id == p_hwfn->rel_pf_id)) { + p_block->status = QED_IGU_STATUS_PF | + QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.cnt++; + } else if (!(p_block->is_pf) && + (p_block->function_id >= min_vf) && + (p_block->function_id < max_vf)) { + /* Available for VFs of this PF */ + p_block->status = QED_IGU_STATUS_VALID | + QED_IGU_STATUS_FREE; + + if (p_igu_info->igu_dsb_id != QED_SB_INVALID_IDX) + p_igu_info->usage.iov_cnt++; + } + + /* Mark the First entry belonging to the PF or its VFs + * as the default SB [we'll reset IGU prior to first usage]. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) && + (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX)) { + p_igu_info->igu_dsb_id = igu_sb_id; + p_block->status |= QED_IGU_STATUS_DSB; + } + + /* limit number of prints by having each PF print only its + * entries with the exception of PF0 which would print + * everything. + */ + if ((p_block->status & QED_IGU_STATUS_VALID) || + (p_hwfn->abs_pf_id == 0)) { + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n", + igu_sb_id, p_block->function_id, + p_block->is_pf, p_block->vector_number); + } + } + + if (p_igu_info->igu_dsb_id == QED_SB_INVALID_IDX) { + DP_NOTICE(p_hwfn, + "IGU CAM returned invalid values igu_dsb_id=0x%x\n", + p_igu_info->igu_dsb_id); + return -EINVAL; + } + + /* All non default SB are considered free at this point */ + p_igu_info->usage.free_cnt = p_igu_info->usage.cnt; + p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt; + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n", + p_igu_info->igu_dsb_id, + p_igu_info->usage.cnt, p_igu_info->usage.iov_cnt); + + return 0; +} + +/** + * qed_int_igu_init_rt() - Initialize IGU runtime registers. + * + * @p_hwfn: HW device data. + */ +void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; + + STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); +} + +u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn) +{ + u32 lsb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - + IGU_CMD_INT_ACK_BASE; + u32 msb_igu_cmd_addr = IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - + IGU_CMD_INT_ACK_BASE; + u32 intr_status_hi = 0, intr_status_lo = 0; + u64 intr_status = 0; + + intr_status_lo = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + lsb_igu_cmd_addr * 8); + intr_status_hi = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + msb_igu_cmd_addr * 8); + intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; + + return intr_status; +} + +static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) +{ + tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc); + p_hwfn->b_sp_dpc_enabled = true; +} + +int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int rc = 0; + + rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt); + if (rc) + return rc; + + rc = qed_int_sb_attn_alloc(p_hwfn, p_ptt); + + return rc; +} + +void qed_int_free(struct qed_hwfn *p_hwfn) +{ + qed_int_sp_sb_free(p_hwfn); + qed_int_sb_attn_free(p_hwfn); +} + +void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + qed_int_sb_attn_setup(p_hwfn, p_ptt); + qed_int_sp_dpc_setup(p_hwfn); +} + +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info) +{ + struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; + + if (!info || !p_sb_cnt_info) + return; + + memcpy(p_sb_cnt_info, &info->usage, sizeof(*p_sb_cnt_info)); +} + +void qed_int_disable_post_isr_release(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + cdev->hwfns[i].b_int_requested = false; +} + +void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable) +{ + cdev->attn_clr_en = clr_enable; +} + +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx) +{ + struct cau_sb_entry sb_entry; + u32 params; + int rc; + + if (!p_hwfn->hw_init_done) { + DP_ERR(p_hwfn, "hardware not initialized yet\n"); + return -EINVAL; + } + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, NULL); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + params = le32_to_cpu(sb_entry.params); + + if (tx) + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES1, timer_res); + else + SET_FIELD(params, CAU_SB_ENTRY_TIMER_RES0, timer_res); + + sb_entry.params = cpu_to_le32(params); + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + sb_id * sizeof(u64), 2, NULL); + if (rc) { + DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc); + return rc; + } + + return rc; +} + +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info) +{ + u16 sbid = p_sb->igu_sb_id; + u32 i; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (sbid >= NUM_OF_SBS(p_hwfn->cdev)) + return -EINVAL; + + p_info->igu_prod = qed_rd(p_hwfn, p_ptt, IGU_REG_PRODUCER_MEMORY + sbid * 4); + p_info->igu_cons = qed_rd(p_hwfn, p_ptt, IGU_REG_CONSUMER_MEM + sbid * 4); + + for (i = 0; i < PIS_PER_SB; i++) + p_info->pi[i] = (u16)qed_rd(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + sbid * 4 * PIS_PER_SB + i * 4); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h new file mode 100644 index 0000000000..7e5127f617 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -0,0 +1,459 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_INT_H +#define _QED_INT_H + +#include <linux/types.h> +#include <linux/slab.h> +#include "qed.h" + +/* Fields of IGU PF CONFIGURATION REGISTER */ +#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */ +#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ +#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ +/* Fields of IGU VF CONFIGURATION REGISTER */ +#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */ + +/* Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + +/* Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */ +#define IGU_CTRL_REG_FID_SHIFT 0 +#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */ +#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16 +#define IGU_CTRL_REG_RESERVED_MASK 0x1 +#define IGU_CTRL_REG_RESERVED_SHIFT 28 +#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */ +#define IGU_CTRL_REG_TYPE_SHIFT 31 +}; + +enum qed_coalescing_fsm { + QED_COAL_RX_STATE_MACHINE, + QED_COAL_TX_STATE_MACHINE +}; + +/** + * qed_int_igu_enable_int(): Enable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. + */ +void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); + +/** + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. + * + * @p_hwfn: HW device data. + * + * Return: u64. + */ +u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); + +#define QED_SP_SB_ID 0xffff +/** + * qed_int_sb_init(): Initializes the sb_info structure. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block + * + * Return: int. + * + * Once the structure is initialized it can be passed to sb related functions. + */ +int qed_int_sb_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + u16 sb_id); +/** + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. + * + * Return: Void. + */ +void qed_int_sb_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_sb_info *sb_info); + +/** + * qed_int_sb_release(): Releases the sb_info structure. + * + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). + * + * Return: int. + * + * Once the structure is released, it's memory can be freed. + */ +int qed_int_sb_release(struct qed_hwfn *p_hwfn, + struct qed_sb_info *sb_info, + u16 sb_id); + +/** + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. + * + * @t: Tasklet. + * + * Return: Void. + * + */ +void qed_int_sp_dpc(struct tasklet_struct *t); + +/** + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. + * + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. + * + * Return: Void. + */ +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info); + +/** + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR + * release. The API need to be called after releasing all slowpath IRQs + * of the device. + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_int_disable_post_isr_release(struct qed_dev *cdev); + +/** + * qed_int_attn_clr_enable: Sets whether the general behavior is + * preventing attentions from being reasserted, or following the + * attributes of the specific attention. + * + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. + * + */ +void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); + +/** + * qed_int_get_sb_dbg: Read debug information regarding a given SB + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource + * @p_sb: pointer to status block for which we want to get info + * @p_info: pointer to struct to fill with information regarding SB + * + * Return: 0 with status block info filled on success, otherwise return error + */ +int qed_int_get_sb_dbg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_sb_info *p_sb, struct qed_sb_info_dbg *p_info); + +/** + * qed_db_rec_handler(): Doorbell Recovery handler. + * Run doorbell recovery in case of PF overflow (and flush DORQ if + * needed). + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +#define QED_CAU_DEF_RX_TIMER_RES 0 +#define QED_CAU_DEF_TX_TIMER_RES 0 + +#define QED_SB_ATT_IDX 0x0001 +#define QED_SB_EVENT_MASK 0x0003 + +#define SB_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + +#define QED_SB_INVALID_IDX 0xffff + +struct qed_igu_block { + u8 status; +#define QED_IGU_STATUS_FREE 0x01 +#define QED_IGU_STATUS_VALID 0x02 +#define QED_IGU_STATUS_PF 0x04 +#define QED_IGU_STATUS_DSB 0x08 + + u8 vector_number; + u8 function_id; + u8 is_pf; + + /* Index inside IGU [meant for back reference] */ + u16 igu_sb_id; + + struct qed_sb_info *sb_info; +}; + +struct qed_igu_info { + struct qed_igu_block entry[MAX_TOT_SB_PER_PATH]; + u16 igu_dsb_id; + + struct qed_sb_cnt_info usage; + + bool b_allow_pf_vf_change; +}; + +/** + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id + * + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. + * + * Return: An index inside IGU CAM where the SB resides. + */ +u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB + * + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. + * + * Return: Point to an igu_block, NULL if none is available. + */ +struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, + bool b_is_pf); + +void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_set, + bool b_slowpath); + +void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); + +/** + * qed_int_igu_read_cam(): Reads the IGU CAM. + * This function needs to be called during hardware + * prepare. It reads the info from igu cam to know which + * status block is the default / base status block etc. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, + void *cookie); +/** + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. + */ +int qed_int_register_cb(struct qed_hwfn *p_hwfn, + qed_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, + __le16 **p_fw_cons); + +/** + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. + * + * Return: Int. + * + * Partner of qed_int_register_cb -> should be called + * when no longer required. + */ +int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, + u8 pi); + +/** + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. + * + * @p_hwfn: HW device data. + * + * Return: u16. + */ +u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); + +/** + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. + */ +void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 igu_sb_id, + u16 opaque, + bool b_set); + +/** + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. + */ +void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, + u16 vf_number, + u8 vf_valid); + +/** + * qed_int_alloc(): QED interrupt alloc. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_int_alloc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_int_free(struct qed_hwfn *p_hwfn); + +/** + * qed_int_setup(): QED interrupt setup. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_int_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode + * + * Return: Int. + */ +int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + enum qed_int_mode int_mode); + +/** + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. + */ +void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, + u16 vf_number, + u8 vf_valid); + +int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u8 timer_res, u16 sb_id, bool tx); + +#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev)) + +int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + bool hw_init); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h new file mode 100644 index 0000000000..3ccdd3b1d8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h @@ -0,0 +1,500 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_IRO_HSI_H +#define _QED_IRO_HSI_H + +#include <linux/types.h> + +enum { + IRO_YSTORM_FLOW_CONTROL_MODE_GTT, + IRO_PSTORM_PKT_DUPLICATION_CFG, + IRO_TSTORM_PORT_STAT, + IRO_TSTORM_LL2_PORT_STAT, + IRO_TSTORM_PKT_DUPLICATION_CFG, + IRO_USTORM_VF_PF_CHANNEL_READY_GTT, + IRO_USTORM_FLR_FINAL_ACK_GTT, + IRO_USTORM_EQE_CONS_GTT, + IRO_USTORM_ETH_QUEUE_ZONE_GTT, + IRO_USTORM_COMMON_QUEUE_CONS_GTT, + IRO_XSTORM_PQ_INFO, + IRO_XSTORM_INTEG_TEST_DATA, + IRO_YSTORM_INTEG_TEST_DATA, + IRO_PSTORM_INTEG_TEST_DATA, + IRO_TSTORM_INTEG_TEST_DATA, + IRO_MSTORM_INTEG_TEST_DATA, + IRO_USTORM_INTEG_TEST_DATA, + IRO_XSTORM_OVERLAY_BUF_ADDR, + IRO_YSTORM_OVERLAY_BUF_ADDR, + IRO_PSTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_OVERLAY_BUF_ADDR, + IRO_MSTORM_OVERLAY_BUF_ADDR, + IRO_USTORM_OVERLAY_BUF_ADDR, + IRO_TSTORM_LL2_RX_PRODS_GTT, + IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_USTORM_PER_QUEUE_STAT, + IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT, + IRO_MSTORM_QUEUE_STAT, + IRO_MSTORM_TPA_TIMEOUT_US, + IRO_MSTORM_ETH_VF_PRODS, + IRO_MSTORM_ETH_PF_PRODS_GTT, + IRO_MSTORM_ETH_PF_STAT, + IRO_USTORM_QUEUE_STAT, + IRO_USTORM_ETH_PF_STAT, + IRO_PSTORM_QUEUE_STAT, + IRO_PSTORM_ETH_PF_STAT, + IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT, + IRO_TSTORM_ETH_PRS_INPUT, + IRO_ETH_RX_RATE_LIMIT, + IRO_TSTORM_ETH_RSS_UPDATE_GTT, + IRO_XSTORM_ETH_QUEUE_ZONE_GTT, + IRO_YSTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_CQ_PROD, + IRO_USTORM_TOE_GRQ_PROD, + IRO_TSTORM_SCSI_CMDQ_CONS_GTT, + IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT, + IRO_TSTORM_ISCSI_RX_STATS, + IRO_MSTORM_ISCSI_RX_STATS, + IRO_USTORM_ISCSI_RX_STATS, + IRO_XSTORM_ISCSI_TX_STATS, + IRO_YSTORM_ISCSI_TX_STATS, + IRO_PSTORM_ISCSI_TX_STATS, + IRO_TSTORM_FCOE_RX_STATS, + IRO_PSTORM_FCOE_TX_STATS, + IRO_PSTORM_RDMA_QUEUE_STAT, + IRO_TSTORM_RDMA_QUEUE_STAT, + IRO_XSTORM_RDMA_ASSERT_LEVEL, + IRO_YSTORM_RDMA_ASSERT_LEVEL, + IRO_PSTORM_RDMA_ASSERT_LEVEL, + IRO_TSTORM_RDMA_ASSERT_LEVEL, + IRO_MSTORM_RDMA_ASSERT_LEVEL, + IRO_USTORM_RDMA_ASSERT_LEVEL, + IRO_XSTORM_IWARP_RXMIT_STATS, + IRO_TSTORM_ROCE_EVENTS_STAT, + IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS, + IRO_YSTORM_ROCE_ERROR_STATS, + IRO_PSTORM_ROCE_DCQCN_SENT_STATS, + IRO_USTORM_ROCE_CQE_STATS, +}; + +/* Pstorm LiteL2 queue statistics */ + +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \ + + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size) + +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size) + +/* Ustorm LiteL2 queue statistics */ +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \ + + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \ + (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size) + +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ + (IRO[IRO_ETH_RX_RATE_LIMIT].base \ + + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size) + +/* Mstorm ETH PF queues producers */ +#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \ + (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \ + + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1)) +#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size) + +/* Mstorm pf statistics */ +#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1)) +#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size) + +/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone + * size mode. + */ +#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \ + (IRO[IRO_MSTORM_ETH_VF_PRODS].base \ + + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \ + + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2)) +#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size) + +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size) + +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size) + +/* Mstorm overlay buffer host address */ +#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base) +#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size) + +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_MSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1)) +#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size) + +/* Mstorm error level for assert */ +#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1)) +#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size) + +/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size) + +/* Control frame's EthType configuration for TX control frame security */ +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \ + + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1)) +#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \ + (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size) + +/* Pstorm pf statistics */ +#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1)) +#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size) + +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_FCOE_TX_STATS].base \ + + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size) + +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size) + +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size) + +/* Pstorm overlay buffer host address */ +#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base) +#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size) + +/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg + * data type. + */ +#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1)) +#define PSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size) + +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_PSTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size) + +/* Pstorm error level for assert */ +#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1)) +#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size) + +/* Pstorm RDMA queue statistics */ +#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1)) +#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size) + +/* DCQCN Sent Statistics */ +#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \ + + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1)) +#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \ + (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size) + +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size) + +/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0. + * Use eth_tstorm_rss_update_data for update. + */ +#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \ + + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1)) +#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\ + (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size) + +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_FCOE_RX_STATS].base \ + + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size) + +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size) + +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size) + +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_LL2_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size) + +/* Tstorm producers */ +#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \ + (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \ + + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1)) +#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size) + +/* Tstorm overlay buffer host address */ +#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base) + +#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size) + +/* Tstorm LL2 packet duplication configuration. + * Use tstorm_pkt_dup_cfg data type. + */ +#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \ + + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1)) +#define TSTORM_PKT_DUPLICATION_CFG_SIZE \ + (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size) + +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ + (IRO[IRO_TSTORM_PORT_STAT].base \ + + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size) + +/* Tstorm error level for assert */ +#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1)) +#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size) + +/* Tstorm RDMA queue statistics */ +#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \ + (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \ + + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1)) +#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size) + +/* Tstorm RoCE Event Statistics */ +#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \ + (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \ + + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1)) +#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size) + +/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID, + * BDqueue-id. + */ +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \ + + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \ + + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size) + +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \ + + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1)) +#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \ + (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size) + +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1)) +#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \ + (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size) + +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_EQE_CONS_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1)) +#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size) + +/* Ustorm pf statistics */ +#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_ETH_PF_STAT].base \ + + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1)) +#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size) + +/* Ustorm eth queue zone */ +#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \ + (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \ + (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \ + + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1)) +#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size) + +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size) + +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_USTORM_ISCSI_RX_STATS].base \ + + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size) + +/* Ustorm overlay buffer host address */ +#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base) +#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size) + +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[IRO_USTORM_QUEUE_STAT].base \ + + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size) + +/* Ustorm error level for assert */ +#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1)) +#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size) + +/* RoCE CQEs Statistics */ +#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_USTORM_ROCE_CQE_STATS].base \ + + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1)) +#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size) + +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_USTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size) + +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[IRO_USTORM_TOE_GRQ_PROD].base \ + + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size) + +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \ + + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1)) +#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \ + (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size) + +/* Xstorm queue zone */ +#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \ + (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \ + + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1)) +#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size) + +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size) + +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size) + +/* Xstorm iWARP rxmit stats */ +#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \ + + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1)) +#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size) + +/* Xstorm overlay buffer host address */ +#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base) +#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size) + +/* Xstorm common PQ info */ +#define XSTORM_PQ_INFO_OFFSET(pq_id) \ + (IRO[IRO_XSTORM_PQ_INFO].base \ + + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1)) +#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size) + +/* Xstorm error level for assert */ +#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1)) +#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size) + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base) +#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \ + (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size) + +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size) + +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \ + (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \ + + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size) + +/* Ystorm overlay buffer host address */ +#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base) +#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size) + +/* Ystorm error level for assert */ +#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \ + (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \ + + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1)) +#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size) + +/* DCQCN Received Statistics */ +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1)) +#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \ + (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size) + +/* RoCE Error Statistics */ +#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \ + (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \ + + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1)) +#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size) + +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[IRO_YSTORM_TOE_CQ_PROD].base \ + + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size) + +/* Per-chip offsets in iro_arr in dwords */ +#define E4_IRO_ARR_OFFSET 0 +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c new file mode 100644 index 0000000000..980e7289b4 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -0,0 +1,1415 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_iscsi_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_iro_hsi.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_reg_addr.h" + +struct qed_iscsi_conn { + struct list_head list_entry; + bool free_on_delete; + + u16 conn_id; + u32 icid; + u32 fw_cid; + + u8 layer_code; + u8 offl_flags; + u8 connect_mode; + u32 initial_ack; + dma_addr_t sq_pbl_addr; + struct qed_chain r2tq; + struct qed_chain xhq; + struct qed_chain uhq; + + struct tcp_upload_params *tcp_upload_params_virt_addr; + dma_addr_t tcp_upload_params_phys_addr; + struct scsi_terminate_extra_params *queue_cnts_virt_addr; + dma_addr_t queue_cnts_phys_addr; + dma_addr_t syn_phy_addr; + + u16 syn_ip_payload_length; + u8 local_mac[6]; + u8 remote_mac[6]; + u16 vlan_id; + u16 tcp_flags; + u8 ip_version; + u32 remote_ip[4]; + u32 local_ip[4]; + u8 ka_max_probe_cnt; + u8 dup_ack_theshold; + u32 rcv_next; + u32 snd_una; + u32 snd_next; + u32 snd_max; + u32 snd_wnd; + u32 rcv_wnd; + u32 snd_wl1; + u32 cwnd; + u32 ss_thresh; + u16 srtt; + u16 rtt_var; + u32 ts_recent; + u32 ts_recent_age; + u32 total_rt; + u32 ka_timeout_delta; + u32 rt_timeout_delta; + u8 dup_ack_cnt; + u8 snd_wnd_probe_cnt; + u8 ka_probe_cnt; + u8 rt_cnt; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u32 initial_rcv_wnd; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 snd_wnd_scale; + u8 rcv_wnd_scale; + u16 da_timeout_value; + u8 ack_frequency; + + u8 update_flag; + u8 default_cq; + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u32 exp_stat_sn; + u32 stat_sn; + u16 physical_q0; + u16 physical_q1; + u8 abortive_dsconnect; +}; + +static int qed_iscsi_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + if (p_hwfn->p_iscsi_info->event_cb) { + struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; + + return p_iscsi->event_cb(p_iscsi->event_context, + fw_event_code, data); + } else { + DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n"); + return -EINVAL; + } +} + +static int +qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr, + void *event_context, iscsi_event_cb_t async_event_cb) +{ + struct iscsi_init_ramrod_params *p_ramrod = NULL; + struct scsi_init_func_queues *p_queue = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct iscsi_spe_func_init *p_init = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + u32 dval; + u16 val; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_init; + p_init = &p_ramrod->iscsi_init_spe; + p_params = &p_hwfn->pf_params.iscsi_pf_params; + p_queue = &p_init->q_params; + + /* Sanity */ + if (p_params->num_queues > p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]) { + DP_ERR(p_hwfn, + "Cannot satisfy CQ amount. Queues requested %d, CQs available %d. Aborting function start\n", + p_params->num_queues, + p_hwfn->hw_info.feat_num[QED_ISCSI_CQ]); + qed_sp_destroy_request(p_hwfn, p_ent); + return -EINVAL; + } + + val = p_params->half_way_close_timeout; + p_init->half_way_close_timeout = cpu_to_le16(val); + p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; + p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; + p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; + p_init->ll2_rx_queue_id = + p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + + p_params->ll2_ooo_queue_id; + + p_init->func_params.log_page_size = p_params->log_page_size; + val = p_params->num_tasks; + p_init->func_params.num_tasks = cpu_to_le16(val); + p_init->debug_mode.flags = p_params->debug_mode; + + DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, + p_params->glbl_q_params_addr); + + val = p_params->cq_num_entries; + p_queue->cq_num_entries = cpu_to_le16(val); + val = p_params->cmdq_num_entries; + p_queue->cmdq_num_entries = cpu_to_le16(val); + p_queue->num_queues = p_params->num_queues; + dval = (u8)p_hwfn->hw_info.resc_start[QED_CMDQS_CQS]; + p_queue->queue_relative_offset = (u8)dval; + p_queue->cq_sb_pi = p_params->gl_rq_pi; + p_queue->cmdq_sb_pi = p_params->gl_cmd_pi; + + for (i = 0; i < p_params->num_queues; i++) { + val = qed_get_igu_sb_id(p_hwfn, i); + p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); + } + + p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_RQ], + p_params->bdq_pbl_base_addr[BDQ_ID_RQ]); + p_queue->bdq_pbl_num_entries[BDQ_ID_RQ] = + p_params->bdq_pbl_num_entries[BDQ_ID_RQ]; + val = p_params->bdq_xoff_threshold[BDQ_ID_RQ]; + p_queue->bdq_xoff_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_RQ]; + p_queue->bdq_xon_threshold[BDQ_ID_RQ] = cpu_to_le16(val); + + DMA_REGPAIR_LE(p_queue->bdq_pbl_base_address[BDQ_ID_IMM_DATA], + p_params->bdq_pbl_base_addr[BDQ_ID_IMM_DATA]); + p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA] = + p_params->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]; + val = p_params->bdq_xoff_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xoff_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->bdq_xon_threshold[BDQ_ID_IMM_DATA]; + p_queue->bdq_xon_threshold[BDQ_ID_IMM_DATA] = cpu_to_le16(val); + val = p_params->rq_buffer_size; + p_queue->rq_buffer_size = cpu_to_le16(val); + if (p_params->is_target) { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + if (p_queue->bdq_pbl_num_entries[BDQ_ID_IMM_DATA]) + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID, 1); + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 1); + } else { + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_RQ_VALID, 1); + } + p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); + val = p_params->tx_sws_timer; + p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); + p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt; + + p_hwfn->p_iscsi_info->event_context = event_context; + p_hwfn->p_iscsi_info->event_cb = async_event_cb; + + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP, + qed_iscsi_async_event); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_offload *p_ramrod = NULL; + struct tcp_offload_params_opt2 *p_tcp2 = NULL; + struct tcp_offload_params *p_tcp = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + dma_addr_t r2tq_pbl_addr; + dma_addr_t xhq_pbl_addr; + dma_addr_t uhq_pbl_addr; + u16 physical_q; + __le16 tmp; + int rc = 0; + u32 dval; + u16 wval; + u16 *p; + u8 i; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_offload; + + /* Transmission PQ is the first of the PF */ + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_conn->physical_q0 = physical_q; + p_ramrod->iscsi.physical_q0 = cpu_to_le16(physical_q); + + /* iSCSI Pure-ACK PQ */ + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_conn->physical_q1 = physical_q; + p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q); + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + + DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr); + + r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); + DMA_REGPAIR_LE(p_ramrod->iscsi.r2tq_pbl_addr, r2tq_pbl_addr); + + xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.xhq_pbl_addr, xhq_pbl_addr); + + uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); + DMA_REGPAIR_LE(p_ramrod->iscsi.uhq_pbl_addr, uhq_pbl_addr); + + p_ramrod->iscsi.initial_ack = cpu_to_le32(p_conn->initial_ack); + p_ramrod->iscsi.flags = p_conn->offl_flags; + p_ramrod->iscsi.default_cq = p_conn->default_cq; + p_ramrod->iscsi.stat_sn = cpu_to_le32(p_conn->stat_sn); + + if (!GET_FIELD(p_ramrod->iscsi.flags, + ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B)) { + p_tcp = &p_ramrod->tcp; + + p = (u16 *)p_conn->local_mac; + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->local_mac_addr_lo = tmp; + + p = (u16 *)p_conn->remote_mac; + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp->remote_mac_addr_lo = tmp; + + p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); + + p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); + p_tcp->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp->local_ip[i] = cpu_to_le32(dval); + } + p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; + p_tcp->dup_ack_theshold = p_conn->dup_ack_theshold; + + p_tcp->rcv_next = cpu_to_le32(p_conn->rcv_next); + p_tcp->snd_una = cpu_to_le32(p_conn->snd_una); + p_tcp->snd_next = cpu_to_le32(p_conn->snd_next); + p_tcp->snd_max = cpu_to_le32(p_conn->snd_max); + p_tcp->snd_wnd = cpu_to_le32(p_conn->snd_wnd); + p_tcp->rcv_wnd = cpu_to_le32(p_conn->rcv_wnd); + p_tcp->snd_wl1 = cpu_to_le32(p_conn->snd_wl1); + p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp->ss_thresh = cpu_to_le32(p_conn->ss_thresh); + p_tcp->srtt = cpu_to_le16(p_conn->srtt); + p_tcp->rtt_var = cpu_to_le16(p_conn->rtt_var); + p_tcp->ts_recent = cpu_to_le32(p_conn->ts_recent); + p_tcp->ts_recent_age = cpu_to_le32(p_conn->ts_recent_age); + p_tcp->total_rt = cpu_to_le32(p_conn->total_rt); + dval = p_conn->ka_timeout_delta; + p_tcp->ka_timeout_delta = cpu_to_le32(dval); + dval = p_conn->rt_timeout_delta; + p_tcp->rt_timeout_delta = cpu_to_le32(dval); + p_tcp->dup_ack_cnt = p_conn->dup_ack_cnt; + p_tcp->snd_wnd_probe_cnt = p_conn->snd_wnd_probe_cnt; + p_tcp->ka_probe_cnt = p_conn->ka_probe_cnt; + p_tcp->rt_cnt = p_conn->rt_cnt; + p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); + p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + dval = p_conn->initial_rcv_wnd; + p_tcp->initial_rcv_wnd = cpu_to_le32(dval); + p_tcp->ttl = p_conn->ttl; + p_tcp->tos_or_tc = p_conn->tos_or_tc; + p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp->local_port = cpu_to_le16(p_conn->local_port); + p_tcp->mss = cpu_to_le16(p_conn->mss); + p_tcp->snd_wnd_scale = p_conn->snd_wnd_scale; + p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; + wval = p_conn->da_timeout_value; + p_tcp->da_timeout_value = cpu_to_le16(wval); + p_tcp->ack_frequency = p_conn->ack_frequency; + p_tcp->connect_mode = p_conn->connect_mode; + } else { + p_tcp2 = + &((struct iscsi_spe_conn_offload_option2 *)p_ramrod)->tcp; + + p = (u16 *)p_conn->local_mac; + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->local_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->local_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->local_mac_addr_lo = tmp; + + p = (u16 *)p_conn->remote_mac; + tmp = cpu_to_le16(get_unaligned_be16(p)); + p_tcp2->remote_mac_addr_hi = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 1)); + p_tcp2->remote_mac_addr_mid = tmp; + tmp = cpu_to_le16(get_unaligned_be16(p + 2)); + p_tcp2->remote_mac_addr_lo = tmp; + + p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); + p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags); + + p_tcp2->ip_version = p_conn->ip_version; + for (i = 0; i < 4; i++) { + dval = p_conn->remote_ip[i]; + p_tcp2->remote_ip[i] = cpu_to_le32(dval); + dval = p_conn->local_ip[i]; + p_tcp2->local_ip[i] = cpu_to_le32(dval); + } + + p_tcp2->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp2->ttl = p_conn->ttl; + p_tcp2->tos_or_tc = p_conn->tos_or_tc; + p_tcp2->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp2->local_port = cpu_to_le16(p_conn->local_port); + p_tcp2->mss = cpu_to_le16(p_conn->mss); + p_tcp2->rcv_wnd_scale = p_conn->rcv_wnd_scale; + p_tcp2->connect_mode = p_conn->connect_mode; + wval = p_conn->syn_ip_payload_length; + p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); + p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); + p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); + p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt; + p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_conn_update_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + u32 dval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_UPDATE_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_update; + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->flags = p_conn->update_flag; + p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); + dval = p_conn->max_recv_pdu_length; + p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); + dval = p_conn->max_send_pdu_length; + p_ramrod->max_send_pdu_length = cpu_to_le32(dval); + dval = p_conn->first_seq_length; + p_ramrod->first_seq_length = cpu_to_le32(dval); + p_ramrod->exp_stat_sn = cpu_to_le32(p_conn->exp_stat_sn); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_mac_update *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u8 ucval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_MAC_UPDATE, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update; + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + ucval = p_conn->remote_mac[1]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval; + ucval = p_conn->remote_mac[0]; + ((u8 *)(&p_ramrod->remote_mac_addr_hi))[1] = ucval; + ucval = p_conn->remote_mac[3]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[0] = ucval; + ucval = p_conn->remote_mac[2]; + ((u8 *)(&p_ramrod->remote_mac_addr_mid))[1] = ucval; + ucval = p_conn->remote_mac[5]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[0] = ucval; + ucval = p_conn->remote_mac[4]; + ((u8 *)(&p_ramrod->remote_mac_addr_lo))[1] = ucval; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct iscsi_spe_conn_termination *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_TERMINATION_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iscsi_conn_terminate; + + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->abortive = p_conn->abortive_dsconnect; + + DMA_REGPAIR_LE(p_ramrod->query_params_addr, + p_conn->tcp_upload_params_phys_addr); + DMA_REGPAIR_LE(p_ramrod->queue_cnts_addr, p_conn->queue_cnts_phys_addr); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_CLEAR_SQ, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ISCSI_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP); + return rc; +} + +static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } +} + +static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, + u8 bdq_id) +{ + if (RESC_NUM(p_hwfn, QED_BDQ)) { + return (u8 __iomem *)p_hwfn->regview + + GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_SCSI_BDQ_EXT_PROD, + RESC_START(p_hwfn, QED_BDQ), bdq_id); + } else { + DP_NOTICE(p_hwfn, "BDQ is not allocated!\n"); + return NULL; + } +} + +static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn) +{ + if (!p_conn->queue_cnts_virt_addr) + goto nomem; + memset(p_conn->queue_cnts_virt_addr, 0, + sizeof(*p_conn->queue_cnts_virt_addr)); + + if (!p_conn->tcp_upload_params_virt_addr) + goto nomem; + memset(p_conn->tcp_upload_params_virt_addr, 0, + sizeof(*p_conn->tcp_upload_params_virt_addr)); + + if (!p_conn->r2tq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->r2tq); + + if (!p_conn->uhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->uhq); + + if (!p_conn->xhq.p_virt_addr) + goto nomem; + qed_chain_pbl_zero_mem(&p_conn->xhq); + + return 0; +nomem: + return -ENOMEM; +} + +static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn **p_out_conn) +{ + struct scsi_terminate_extra_params *p_q_cnts = NULL; + struct qed_iscsi_pf_params *p_params = NULL; + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + }; + struct tcp_upload_params *p_tcp = NULL; + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + + /* Try finding a free connection that can be used */ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + if (!list_empty(&p_hwfn->p_iscsi_info->free_list)) + p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, + struct qed_iscsi_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + *p_out_conn = p_conn; + return 0; + } + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + + /* Need to allocate a new connection */ + p_params = &p_hwfn->pf_params.iscsi_pf_params; + + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + p_q_cnts = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_q_cnts), + &p_conn->queue_cnts_phys_addr, + GFP_KERNEL); + if (!p_q_cnts) + goto nomem_queue_cnts_param; + p_conn->queue_cnts_virt_addr = p_q_cnts; + + p_tcp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_tcp), + &p_conn->tcp_upload_params_phys_addr, + GFP_KERNEL); + if (!p_tcp) + goto nomem_upload_param; + p_conn->tcp_upload_params_virt_addr = p_tcp; + + params.num_elems = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_wqe); + params.elem_size = sizeof(struct iscsi_wqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, ¶ms); + if (rc) + goto nomem_r2tq; + + params.num_elems = p_params->num_uhq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); + params.elem_size = sizeof(struct iscsi_uhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, ¶ms); + if (rc) + goto nomem_uhq; + + params.elem_size = sizeof(struct iscsi_xhqe); + + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, ¶ms); + if (rc) + goto nomem; + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + return 0; + +nomem: + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); +nomem_uhq: + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); +nomem_r2tq: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct tcp_upload_params), + p_conn->tcp_upload_params_virt_addr, + p_conn->tcp_upload_params_phys_addr); +nomem_upload_param: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct scsi_terminate_extra_params), + p_conn->queue_cnts_virt_addr, + p_conn->queue_cnts_phys_addr); +nomem_queue_cnts_param: + kfree(p_conn); + + return -ENOMEM; +} + +static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_in_conn, + struct qed_iscsi_conn **p_out_conn) +{ + struct qed_iscsi_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + if (rc) + return rc; + + /* Use input connection or allocate a new one */ + if (p_in_conn) + p_conn = p_in_conn; + else + rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); + + if (!rc) + rc = qed_iscsi_setup_connection(p_conn); + + if (rc) { + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); + return rc; + } + + p_conn->icid = icid; + p_conn->conn_id = (u16)icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + + *p_out_conn = p_conn; + + return rc; +} + +static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_iscsi_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_iscsi_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_iscsi_info->lock); +} + +static void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn, + struct qed_iscsi_conn *p_conn) +{ + qed_chain_free(p_hwfn->cdev, &p_conn->xhq); + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct tcp_upload_params), + p_conn->tcp_upload_params_virt_addr, + p_conn->tcp_upload_params_phys_addr); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct scsi_terminate_extra_params), + p_conn->queue_cnts_virt_addr, + p_conn->queue_cnts_phys_addr); + kfree(p_conn); +} + +int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_iscsi_info *p_iscsi_info; + + p_iscsi_info = kzalloc(sizeof(*p_iscsi_info), GFP_KERNEL); + if (!p_iscsi_info) + return -ENOMEM; + + INIT_LIST_HEAD(&p_iscsi_info->free_list); + + p_hwfn->p_iscsi_info = p_iscsi_info; + return 0; +} + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn) +{ + spin_lock_init(&p_hwfn->p_iscsi_info->lock); +} + +void qed_iscsi_free(struct qed_hwfn *p_hwfn) +{ + struct qed_iscsi_conn *p_conn = NULL; + + if (!p_hwfn->p_iscsi_info) + return; + + while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) { + p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list, + struct qed_iscsi_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + qed_iscsi_free_connection(p_hwfn, p_conn); + } + } + + kfree(p_hwfn->p_iscsi_info); + p_hwfn->p_iscsi_info = NULL; +} + +static void _qed_iscsi_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct tstorm_iscsi_stats_drv tstats; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->iscsi_rx_bytes_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_bytes_cnt); + p_stats->iscsi_rx_packet_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_packet_cnt); + p_stats->iscsi_rx_new_ooo_isle_events_cnt = + HILO_64_REGPAIR(tstats.iscsi_rx_new_ooo_isle_events_cnt); + p_stats->iscsi_cmdq_threshold_cnt = + le32_to_cpu(tstats.iscsi_cmdq_threshold_cnt); + p_stats->iscsi_rq_threshold_cnt = + le32_to_cpu(tstats.iscsi_rq_threshold_cnt); + p_stats->iscsi_immq_threshold_cnt = + le32_to_cpu(tstats.iscsi_immq_threshold_cnt); +} + +static void _qed_iscsi_get_mstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct mstorm_iscsi_stats_drv mstats; + u32 mstats_addr; + + memset(&mstats, 0, sizeof(mstats)); + mstats_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, sizeof(mstats)); + + p_stats->iscsi_rx_dropped_pdus_task_not_valid = + HILO_64_REGPAIR(mstats.iscsi_rx_dropped_pdus_task_not_valid); +} + +static void _qed_iscsi_get_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ustorm_iscsi_stats_drv ustats; + u32 ustats_addr; + + memset(&ustats, 0, sizeof(ustats)); + ustats_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_ISCSI_RX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); + + p_stats->iscsi_rx_data_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_data_pdu_cnt); + p_stats->iscsi_rx_r2t_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_r2t_pdu_cnt); + p_stats->iscsi_rx_total_pdu_cnt = + HILO_64_REGPAIR(ustats.iscsi_rx_total_pdu_cnt); +} + +static void _qed_iscsi_get_xstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct xstorm_iscsi_stats_drv xstats; + u32 xstats_addr; + + memset(&xstats, 0, sizeof(xstats)); + xstats_addr = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &xstats, xstats_addr, sizeof(xstats)); + + p_stats->iscsi_tx_go_to_slow_start_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_go_to_slow_start_event_cnt); + p_stats->iscsi_tx_fast_retransmit_event_cnt = + HILO_64_REGPAIR(xstats.iscsi_tx_fast_retransmit_event_cnt); +} + +static void _qed_iscsi_get_ystats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct ystorm_iscsi_stats_drv ystats; + u32 ystats_addr; + + memset(&ystats, 0, sizeof(ystats)); + ystats_addr = BAR0_MAP_REG_YSDM_RAM + + YSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &ystats, ystats_addr, sizeof(ystats)); + + p_stats->iscsi_tx_data_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_data_pdu_cnt); + p_stats->iscsi_tx_r2t_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_r2t_pdu_cnt); + p_stats->iscsi_tx_total_pdu_cnt = + HILO_64_REGPAIR(ystats.iscsi_tx_total_pdu_cnt); +} + +static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iscsi_stats *p_stats) +{ + struct pstorm_iscsi_stats_drv pstats; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_ISCSI_TX_STATS_OFFSET(p_hwfn->rel_pf_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->iscsi_tx_bytes_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_bytes_cnt); + p_stats->iscsi_tx_packet_cnt = + HILO_64_REGPAIR(pstats.iscsi_tx_packet_cnt); +} + +static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, + struct qed_iscsi_stats *stats, + bool is_atomic) +{ + struct qed_ptt *p_ptt; + + memset(stats, 0, sizeof(*stats)); + + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EAGAIN; + } + + _qed_iscsi_get_tstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_mstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ustats(p_hwfn, p_ptt, stats); + + _qed_iscsi_get_xstats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_ystats(p_hwfn, p_ptt, stats); + _qed_iscsi_get_pstats(p_hwfn, p_ptt, stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +struct qed_hash_iscsi_con { + struct hlist_node node; + struct qed_iscsi_conn *con; +}; + +static int qed_fill_iscsi_dev_info(struct qed_dev *cdev, + struct qed_dev_iscsi_info *info) +{ + struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); + + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + + info->primary_dbq_rq_addr = + qed_iscsi_get_primary_bdq_prod(hwfn, BDQ_ID_RQ); + info->secondary_bdq_rq_addr = + qed_iscsi_get_secondary_bdq_prod(hwfn, BDQ_ID_RQ); + + info->num_cqs = FEAT_NUM(hwfn, QED_ISCSI_CQ); + + return rc; +} + +static void qed_register_iscsi_ops(struct qed_dev *cdev, + struct qed_iscsi_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.iscsi = ops; + cdev->ops_cookie = cookie; +} + +static struct qed_hash_iscsi_con *qed_iscsi_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_iscsi_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || (hash_con->con->icid != handle)) + return NULL; + + return hash_con; +} + +static int qed_iscsi_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "iscsi already stopped\n"); + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop iscsi - not all connections were returned\n"); + return -EINVAL; + } + + /* Stop the iscsi */ + rc = qed_sp_iscsi_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, + NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_iscsi_start(struct qed_dev *cdev, + struct qed_iscsi_tid *tasks, + void *event_context, + iscsi_event_cb_t async_event_cb) +{ + int rc; + struct qed_tid_mem *tid_info; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "iscsi already started;\n"); + return 0; + } + + rc = qed_sp_iscsi_func_start(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, + NULL, event_context, async_event_cb); + if (rc) { + DP_NOTICE(cdev, "Failed to start iscsi\n"); + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (!tasks) + return 0; + + tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); + + if (!tid_info) { + qed_iscsi_stop(cdev); + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_iscsi_stop(cdev); + kfree(tid_info); + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_ISCSI * sizeof(u8 *)); + + kfree(tid_info); + + return 0; +} + +static int qed_iscsi_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_iscsi_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); + if (!hash_con) + return -ENOMEM; + + /* Acquire the connection */ + rc = qed_iscsi_acquire_connection(QED_AFFIN_HWFN(cdev), NULL, + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + + if (p_doorbell) + *p_doorbell = qed_iscsi_get_db_addr(QED_AFFIN_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_iscsi_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_iscsi_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_iscsi_offload_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_offload *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + ether_addr_copy(con->local_mac, conn_info->src.mac); + ether_addr_copy(con->remote_mac, conn_info->dst.mac); + memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); + memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); + con->local_port = conn_info->src.port; + con->remote_port = conn_info->dst.port; + + con->layer_code = conn_info->layer_code; + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->initial_ack = conn_info->initial_ack; + con->vlan_id = conn_info->vlan_id; + con->tcp_flags = conn_info->tcp_flags; + con->ip_version = conn_info->ip_version; + con->default_cq = conn_info->default_cq; + con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; + con->dup_ack_theshold = conn_info->dup_ack_theshold; + con->rcv_next = conn_info->rcv_next; + con->snd_una = conn_info->snd_una; + con->snd_next = conn_info->snd_next; + con->snd_max = conn_info->snd_max; + con->snd_wnd = conn_info->snd_wnd; + con->rcv_wnd = conn_info->rcv_wnd; + con->snd_wl1 = conn_info->snd_wl1; + con->cwnd = conn_info->cwnd; + con->ss_thresh = conn_info->ss_thresh; + con->srtt = conn_info->srtt; + con->rtt_var = conn_info->rtt_var; + con->ts_recent = conn_info->ts_recent; + con->ts_recent_age = conn_info->ts_recent_age; + con->total_rt = conn_info->total_rt; + con->ka_timeout_delta = conn_info->ka_timeout_delta; + con->rt_timeout_delta = conn_info->rt_timeout_delta; + con->dup_ack_cnt = conn_info->dup_ack_cnt; + con->snd_wnd_probe_cnt = conn_info->snd_wnd_probe_cnt; + con->ka_probe_cnt = conn_info->ka_probe_cnt; + con->rt_cnt = conn_info->rt_cnt; + con->flow_label = conn_info->flow_label; + con->ka_timeout = conn_info->ka_timeout; + con->ka_interval = conn_info->ka_interval; + con->max_rt_time = conn_info->max_rt_time; + con->initial_rcv_wnd = conn_info->initial_rcv_wnd; + con->ttl = conn_info->ttl; + con->tos_or_tc = conn_info->tos_or_tc; + con->remote_port = conn_info->remote_port; + con->local_port = conn_info->local_port; + con->mss = conn_info->mss; + con->snd_wnd_scale = conn_info->snd_wnd_scale; + con->rcv_wnd_scale = conn_info->rcv_wnd_scale; + con->da_timeout_value = conn_info->da_timeout_value; + con->ack_frequency = conn_info->ack_frequency; + + /* Set default values on other connection fields */ + con->offl_flags = 0x1; + + return qed_sp_iscsi_conn_offload(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_update_conn(struct qed_dev *cdev, + u32 handle, + struct qed_iscsi_params_update *conn_info) +{ + struct qed_hash_iscsi_con *hash_con; + struct qed_iscsi_conn *con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + con->update_flag = conn_info->update_flag; + con->max_seq_size = conn_info->max_seq_size; + con->max_recv_pdu_length = conn_info->max_recv_pdu_length; + con->max_send_pdu_length = conn_info->max_send_pdu_length; + con->first_seq_length = conn_info->first_seq_length; + con->exp_stat_sn = conn_info->exp_stat_sn; + + return qed_sp_iscsi_conn_update(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_clear_conn_sq(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_destroy_conn(struct qed_dev *cdev, + u32 handle, u8 abrt_conn) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + hash_con->con->abortive_dsconnect = abrt_conn; + + return qed_sp_iscsi_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_iscsi_stats_context(struct qed_dev *cdev, + struct qed_iscsi_stats *stats, + bool is_atomic) +{ + return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + +static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) +{ + return qed_iscsi_stats_context(cdev, stats, false); +} + +static int qed_iscsi_change_mac(struct qed_dev *cdev, + u32 handle, const u8 *mac) +{ + struct qed_hash_iscsi_con *hash_con; + + hash_con = qed_iscsi_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + return -EINVAL; + } + + return qed_sp_iscsi_mac_update(QED_AFFIN_HWFN(cdev), hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) +{ + struct qed_iscsi_stats proto_stats; + + /* Retrieve FW statistics */ + memset(&proto_stats, 0, sizeof(proto_stats)); + if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, + "Failed to collect ISCSI statistics\n"); + return; + } + + /* Translate FW statistics into struct */ + stats->rx_pdus = proto_stats.iscsi_rx_total_pdu_cnt; + stats->tx_pdus = proto_stats.iscsi_tx_total_pdu_cnt; + stats->rx_bytes = proto_stats.iscsi_rx_bytes_cnt; + stats->tx_bytes = proto_stats.iscsi_tx_bytes_cnt; +} + +static const struct qed_iscsi_ops qed_iscsi_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_iscsi_dev_info, + .register_ops = &qed_register_iscsi_ops, + .start = &qed_iscsi_start, + .stop = &qed_iscsi_stop, + .acquire_conn = &qed_iscsi_acquire_conn, + .release_conn = &qed_iscsi_release_conn, + .offload_conn = &qed_iscsi_offload_conn, + .update_conn = &qed_iscsi_update_conn, + .destroy_conn = &qed_iscsi_destroy_conn, + .clear_sq = &qed_iscsi_clear_conn_sq, + .get_stats = &qed_iscsi_stats, + .change_mac = &qed_iscsi_change_mac, +}; + +const struct qed_iscsi_ops *qed_get_iscsi_ops(void) +{ + return &qed_iscsi_ops_pass; +} +EXPORT_SYMBOL(qed_get_iscsi_ops); + +void qed_put_iscsi_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_iscsi_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h new file mode 100644 index 0000000000..974cb8d266 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_ISCSI_H +#define _QED_ISCSI_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/tcp_common.h> +#include <linux/qed/qed_iscsi_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +struct qed_iscsi_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; + u16 max_num_outstanding_tasks; + void *event_context; + iscsi_event_cb_t event_cb; +}; + +#if IS_ENABLED(CONFIG_QED_ISCSI) +int qed_iscsi_alloc(struct qed_hwfn *p_hwfn); + +void qed_iscsi_setup(struct qed_hwfn *p_hwfn); + +void qed_iscsi_free(struct qed_hwfn *p_hwfn); + +/** + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats, + bool is_atomic); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {} + +static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} + +static inline void +qed_get_protocol_stats_iscsi(struct qed_dev *cdev, + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c new file mode 100644 index 0000000000..1d1d4caad6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -0,0 +1,3262 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/spinlock.h> +#include <linux/tcp.h> +#include "qed_cxt.h" +#include "qed_hw.h" +#include "qed_ll2.h" +#include "qed_rdma.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_ooo.h" + +#define QED_IWARP_ORD_DEFAULT 32 +#define QED_IWARP_IRD_DEFAULT 32 +#define QED_IWARP_MAX_FW_MSS 4120 + +#define QED_EP_SIG 0xecabcdef + +struct mpa_v2_hdr { + __be16 ird; + __be16 ord; +}; + +#define MPA_V2_PEER2PEER_MODEL 0x8000 +#define MPA_V2_SEND_RTR 0x4000 /* on ird */ +#define MPA_V2_READ_RTR 0x4000 /* on ord */ +#define MPA_V2_WRITE_RTR 0x8000 +#define MPA_V2_IRD_ORD_MASK 0x3FFF + +#define MPA_REV2(_mpa_rev) ((_mpa_rev) == MPA_NEGOTIATION_TYPE_ENHANCED) + +#define QED_IWARP_INVALID_TCP_CID 0xffffffff + +#define QED_IWARP_RCV_WND_SIZE_DEF_BB_2P (200 * 1024) +#define QED_IWARP_RCV_WND_SIZE_DEF_BB_4P (100 * 1024) +#define QED_IWARP_RCV_WND_SIZE_DEF_AH_2P (150 * 1024) +#define QED_IWARP_RCV_WND_SIZE_DEF_AH_4P (90 * 1024) + +#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff) +#define TIMESTAMP_HEADER_SIZE (12) +#define QED_IWARP_MAX_FIN_RT_DEFAULT (2) + +#define QED_IWARP_TS_EN BIT(0) +#define QED_IWARP_DA_EN BIT(1) +#define QED_IWARP_PARAM_CRC_NEEDED (1) +#define QED_IWARP_PARAM_P2P (1) + +#define QED_IWARP_DEF_MAX_RT_TIME (0) +#define QED_IWARP_DEF_CWND_FACTOR (4) +#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5) +#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */ +#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */ + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + __le16 echo, union event_ring_data *data, + u8 fw_return_code); + +/* Override devinfo with iWARP specific values */ +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + dev->max_inline = IWARP_REQ_MAX_INLINE_DATA_SIZE; + dev->max_qp = min_t(u32, + IWARP_MAX_QPS, + p_hwfn->p_rdma_info->num_qps) - + QED_IWARP_PREALLOC_CNT; + + dev->max_cq = dev->max_qp; + + dev->max_qp_resp_rd_atomic_resc = QED_IWARP_IRD_DEFAULT; + dev->max_qp_req_rd_atomic_resc = QED_IWARP_ORD_DEFAULT; +} + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_TCP; + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1); + p_hwfn->b_rdma_enabled_in_prs = true; +} + +/* We have two cid maps, one for tcp which should be used only from passive + * syn processing and replacing a pre-allocated ep in the list. The second + * for active tcp and for QPs. + */ +static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + if (cid < QED_IWARP_PREALLOC_CNT) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + cid); + else + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +void +qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_ramrod_data *p_ramrod) +{ + p_ramrod->iwarp.ll2_ooo_q_index = + RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + + p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; + + p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT); + p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT); + p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT; + + return; +} + +static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) { + DP_NOTICE(p_hwfn, "Failed in allocating iwarp cid\n"); + return rc; + } + *cid += qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *cid); + if (rc) + qed_iwarp_cid_cleaned(p_hwfn, *cid); + + return rc; +} + +static void qed_iwarp_set_tcp_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + cid -= qed_cxt_get_proto_cid_start(p_hwfn, p_hwfn->p_rdma_info->proto); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +/* This function allocates a cid for passive tcp (called from syn receive) + * the reason it's separate from the regular cid allocation is because it + * is assured that these cids already have ilt allocated. They are preallocated + * to ensure that we won't need to allocate memory during syn processing + */ +static int qed_iwarp_alloc_tcp_cid(struct qed_hwfn *p_hwfn, u32 *cid) +{ + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map, cid); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "can't allocate iwarp tcp cid max-count=%d\n", + p_hwfn->p_rdma_info->tcp_cid_map.max_count); + + *cid = QED_IWARP_INVALID_TCP_CID; + return rc; + } + + *cid += qed_cxt_get_proto_cid_start(p_hwfn, + p_hwfn->p_rdma_info->proto); + return 0; +} + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params) +{ + struct iwarp_create_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 physical_queue; + u32 cid; + int rc; + + qp->shared_queue = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + &qp->shared_queue_phys_addr, + GFP_KERNEL); + if (!qp->shared_queue) + return -ENOMEM; + + out_params->sq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->sq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET; + out_params->rq_pbl_virt = (u8 *)qp->shared_queue + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + out_params->rq_pbl_phys = qp->shared_queue_phys_addr + + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET; + + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err1; + + qp->icid = (u16)cid; + + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.cid = qp->icid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_CREATE_QP, + PROTOCOLID_IWARP, &init_data); + if (rc) + goto err2; + + p_ramrod = &p_ent->ramrod.iwarp_create_qp; + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_FMR_AND_RESERVED_EN, + qp->fmr_and_reserved_lkey); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SIGNALED_COMP, qp->signal_all); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_RD_EN, + qp->incoming_rdma_read_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_RDMA_WR_EN, + qp->incoming_rdma_write_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_ATOMIC_EN, + qp->incoming_atomic_en); + + SET_FIELD(p_ramrod->flags, + IWARP_CREATE_QP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + + p_ramrod->pd = cpu_to_le16(qp->pd); + p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); + p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); + + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; + + p_ramrod->cq_cid_for_sq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); + p_ramrod->cq_cid_for_rq = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->rq_cq_id); + + p_ramrod->dpi = cpu_to_le16(qp->dpi); + + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_ramrod->physical_q0 = cpu_to_le16(physical_queue); + physical_queue = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_ramrod->physical_q1 = cpu_to_le16(physical_queue); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err2; + + return rc; + +err2: + qed_iwarp_cid_cleaned(p_hwfn, cid); +err1: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +static int qed_iwarp_modify_fw(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct iwarp_modify_qp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 flags, trans_to_state; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MODIFY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.iwarp_modify_qp; + + flags = le16_to_cpu(p_ramrod->flags); + SET_FIELD(flags, IWARP_MODIFY_QP_RAMROD_DATA_STATE_TRANS_EN, 0x1); + p_ramrod->flags = cpu_to_le16(flags); + + if (qp->iwarp_state == QED_IWARP_QP_STATE_CLOSING) + trans_to_state = IWARP_MODIFY_QP_STATE_CLOSING; + else + trans_to_state = IWARP_MODIFY_QP_STATE_ERROR; + + p_ramrod->transition_to_state = cpu_to_le16(trans_to_state); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x)rc=%d\n", qp->icid, rc); + + return rc; +} + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state) +{ + switch (state) { + case QED_ROCE_QP_STATE_RESET: + case QED_ROCE_QP_STATE_INIT: + case QED_ROCE_QP_STATE_RTR: + return QED_IWARP_QP_STATE_IDLE; + case QED_ROCE_QP_STATE_RTS: + return QED_IWARP_QP_STATE_RTS; + case QED_ROCE_QP_STATE_SQD: + return QED_IWARP_QP_STATE_CLOSING; + case QED_ROCE_QP_STATE_ERR: + return QED_IWARP_QP_STATE_ERROR; + case QED_ROCE_QP_STATE_SQE: + return QED_IWARP_QP_STATE_TERMINATE; + default: + return QED_IWARP_QP_STATE_ERROR; + } +} + +static enum qed_roce_qp_state +qed_iwarp2roce_state(enum qed_iwarp_qp_state state) +{ + switch (state) { + case QED_IWARP_QP_STATE_IDLE: + return QED_ROCE_QP_STATE_INIT; + case QED_IWARP_QP_STATE_RTS: + return QED_ROCE_QP_STATE_RTS; + case QED_IWARP_QP_STATE_TERMINATE: + return QED_ROCE_QP_STATE_SQE; + case QED_IWARP_QP_STATE_CLOSING: + return QED_ROCE_QP_STATE_SQD; + case QED_IWARP_QP_STATE_ERROR: + return QED_ROCE_QP_STATE_ERR; + default: + return QED_ROCE_QP_STATE_ERR; + } +} + +static const char * const iwarp_state_names[] = { + "IDLE", + "RTS", + "TERMINATE", + "CLOSING", + "ERROR", +}; + +int +qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal) +{ + enum qed_iwarp_qp_state prev_iw_state; + bool modify_fw = false; + int rc = 0; + + /* modify QP can be called from upper-layer or as a result of async + * RST/FIN... therefore need to protect + */ + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + prev_iw_state = qp->iwarp_state; + + if (prev_iw_state == new_state) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + return 0; + } + + switch (prev_iw_state) { + case QED_IWARP_QP_STATE_IDLE: + switch (new_state) { + case QED_IWARP_QP_STATE_RTS: + qp->iwarp_state = QED_IWARP_QP_STATE_RTS; + break; + case QED_IWARP_QP_STATE_ERROR: + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + if (!internal) + modify_fw = true; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_RTS: + switch (new_state) { + case QED_IWARP_QP_STATE_CLOSING: + if (!internal) + modify_fw = true; + + qp->iwarp_state = QED_IWARP_QP_STATE_CLOSING; + break; + case QED_IWARP_QP_STATE_ERROR: + if (!internal) + modify_fw = true; + qp->iwarp_state = QED_IWARP_QP_STATE_ERROR; + break; + default: + break; + } + break; + case QED_IWARP_QP_STATE_ERROR: + switch (new_state) { + case QED_IWARP_QP_STATE_IDLE: + + qp->iwarp_state = new_state; + break; + case QED_IWARP_QP_STATE_CLOSING: + /* could happen due to race... do nothing.... */ + break; + default: + rc = -EINVAL; + } + break; + case QED_IWARP_QP_STATE_TERMINATE: + case QED_IWARP_QP_STATE_CLOSING: + qp->iwarp_state = new_state; + break; + default: + break; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) %s --> %s%s\n", + qp->icid, + iwarp_state_names[prev_iw_state], + iwarp_state_names[qp->iwarp_state], + internal ? "internal" : ""); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.qp_lock); + + if (modify_fw) + rc = qed_iwarp_modify_fw(p_hwfn, qp); + + return rc; +} + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_DESTROY_QP, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) rc = %d\n", qp->icid, rc); + + return rc; +} + +static void qed_iwarp_destroy_ep(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + bool remove_from_active_list) +{ + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + ep->ep_buffer_virt, ep->ep_buffer_phys); + + if (remove_from_active_list) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + if (ep->qp) + ep->qp->ep = NULL; + + kfree(ep); +} + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + struct qed_iwarp_ep *ep = qp->ep; + int wait_count = 0; + int rc = 0; + + if (qp->iwarp_state != QED_IWARP_QP_STATE_ERROR) { + rc = qed_iwarp_modify_qp(p_hwfn, qp, + QED_IWARP_QP_STATE_ERROR, false); + if (rc) + return rc; + } + + /* Make sure ep is closed before returning and freeing memory. */ + if (ep) { + while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED && + wait_count++ < 200) + msleep(100); + + if (ep->state != QED_IWARP_EP_CLOSED) + DP_NOTICE(p_hwfn, "ep state close timeout state=%x\n", + ep->state); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } + + rc = qed_iwarp_fw_destroy(p_hwfn, qp); + + if (qp->shared_queue) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + IWARP_SHARED_QUEUE_PAGE_SIZE, + qp->shared_queue, qp->shared_queue_phys_addr); + + return rc; +} + +static int +qed_iwarp_create_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep **ep_out) +{ + struct qed_iwarp_ep *ep; + int rc; + + ep = kzalloc(sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->state = QED_IWARP_EP_INIT; + + ep->ep_buffer_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*ep->ep_buffer_virt), + &ep->ep_buffer_phys, + GFP_KERNEL); + if (!ep->ep_buffer_virt) { + rc = -ENOMEM; + goto err; + } + + ep->sig = QED_EP_SIG; + + *ep_out = ep; + + return 0; + +err: + kfree(ep); + return rc; +} + +static void +qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "local_mac=%x %x %x, remote_mac=%x %x %x\n", + p_tcp_ramrod->tcp.local_mac_addr_lo, + p_tcp_ramrod->tcp.local_mac_addr_mid, + p_tcp_ramrod->tcp.local_mac_addr_hi, + p_tcp_ramrod->tcp.remote_mac_addr_lo, + p_tcp_ramrod->tcp.remote_mac_addr_mid, + p_tcp_ramrod->tcp.remote_mac_addr_hi); + + if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", + p_tcp_ramrod->tcp.local_ip, + p_tcp_ramrod->tcp.local_port, + p_tcp_ramrod->tcp.remote_ip, + p_tcp_ramrod->tcp.remote_port, + p_tcp_ramrod->tcp.vlan_id); + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "flow_label=%x, ttl=%x, tos_or_tc=%x, mss=%x, rcv_wnd_scale=%x, connect_mode=%x, flags=%x\n", + p_tcp_ramrod->tcp.flow_label, + p_tcp_ramrod->tcp.ttl, + p_tcp_ramrod->tcp.tos_or_tc, + p_tcp_ramrod->tcp.mss, + p_tcp_ramrod->tcp.rcv_wnd_scale, + p_tcp_ramrod->tcp.connect_mode, + p_tcp_ramrod->tcp.flags); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "syn_ip_payload_length=%x, lo=%x, hi=%x\n", + p_tcp_ramrod->tcp.syn_ip_payload_length, + p_tcp_ramrod->tcp.syn_phy_addr_lo, + p_tcp_ramrod->tcp.syn_phy_addr_hi); +} + +static int +qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct iwarp_tcp_offload_ramrod_data *p_tcp_ramrod; + struct tcp_offload_params_opt2 *tcp; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t async_output_phys; + dma_addr_t in_pdata_phys; + u16 physical_q; + u16 flags = 0; + u8 tcp_flags; + int rc; + int i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = ep->tcp_cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + if (ep->connect_mode == TCP_CONNECT_PASSIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_TCP_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_tcp_ramrod = &p_ent->ramrod.iwarp_tcp_offload; + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.incoming_ulp_buffer.addr, + in_pdata_phys); + + p_tcp_ramrod->iwarp.incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_tcp_ramrod->iwarp.async_eqe_output_buf, + async_output_phys); + + p_tcp_ramrod->iwarp.handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_tcp_ramrod->iwarp.handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_tcp_ramrod->iwarp.physical_q0 = cpu_to_le16(physical_q); + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_tcp_ramrod->iwarp.physical_q1 = cpu_to_le16(physical_q); + p_tcp_ramrod->iwarp.mpa_mode = iwarp_info->mpa_rev; + + tcp = &p_tcp_ramrod->tcp; + qed_set_fw_mac_addr(&tcp->remote_mac_addr_hi, + &tcp->remote_mac_addr_mid, + &tcp->remote_mac_addr_lo, ep->remote_mac_addr); + qed_set_fw_mac_addr(&tcp->local_mac_addr_hi, &tcp->local_mac_addr_mid, + &tcp->local_mac_addr_lo, ep->local_mac_addr); + + tcp->vlan_id = cpu_to_le16(ep->cm_info.vlan); + + tcp_flags = p_hwfn->p_rdma_info->iwarp.tcp_flags; + + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, + !!(tcp_flags & QED_IWARP_TS_EN)); + + SET_FIELD(flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, + !!(tcp_flags & QED_IWARP_DA_EN)); + + tcp->flags = cpu_to_le16(flags); + tcp->ip_version = ep->cm_info.ip_version; + + for (i = 0; i < 4; i++) { + tcp->remote_ip[i] = cpu_to_le32(ep->cm_info.remote_ip[i]); + tcp->local_ip[i] = cpu_to_le32(ep->cm_info.local_ip[i]); + } + + tcp->remote_port = cpu_to_le16(ep->cm_info.remote_port); + tcp->local_port = cpu_to_le16(ep->cm_info.local_port); + tcp->mss = cpu_to_le16(ep->mss); + tcp->flow_label = 0; + tcp->ttl = 0x40; + tcp->tos_or_tc = 0; + + tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME; + tcp->cwnd = cpu_to_le32(QED_IWARP_DEF_CWND_FACTOR * ep->mss); + tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT; + tcp->ka_timeout = cpu_to_le32(QED_IWARP_DEF_KA_TIMEOUT); + tcp->ka_interval = cpu_to_le32(QED_IWARP_DEF_KA_INTERVAL); + + tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; + tcp->connect_mode = ep->connect_mode; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + tcp->syn_ip_payload_length = + cpu_to_le16(ep->syn_ip_payload_length); + tcp->syn_phy_addr_hi = DMA_HI_LE(ep->syn_phy_addr); + tcp->syn_phy_addr_lo = DMA_LO_LE(ep->syn_phy_addr); + } + + qed_iwarp_print_tcp_ramrod(p_hwfn, p_tcp_ramrod); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "EP(0x%x) Offload completed rc=%d\n", ep->tcp_cid, rc); + + return rc; +} + +static void +qed_iwarp_mpa_received(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_cm_event_params params; + struct mpa_v2_hdr *mpa_v2; + union async_output *async_data; + u16 mpa_ord, mpa_ird; + u8 mpa_hdr_size = 0; + u16 ulp_data_len; + u8 mpa_rev; + + async_data = &ep->ep_buffer_virt->async_output; + + mpa_rev = async_data->mpa_request.mpa_handshake_mode; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len=%x handshake_mode=%x private_data=(%x)\n", + async_data->mpa_request.ulp_data_len, + mpa_rev, *((u32 *)(ep->ep_buffer_virt->in_pdata))); + + if (mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Read ord/ird values from private data buffer */ + mpa_v2 = (struct mpa_v2_hdr *)ep->ep_buffer_virt->in_pdata; + mpa_hdr_size = sizeof(*mpa_v2); + + mpa_ord = ntohs(mpa_v2->ord); + mpa_ird = ntohs(mpa_v2->ird); + + /* Temprary store in cm_info incoming ord/ird requested, later + * replace with negotiated value during accept + */ + ep->cm_info.ord = (u8)min_t(u16, + (mpa_ord & MPA_V2_IRD_ORD_MASK), + QED_IWARP_ORD_DEFAULT); + + ep->cm_info.ird = (u8)min_t(u16, + (mpa_ird & MPA_V2_IRD_ORD_MASK), + QED_IWARP_IRD_DEFAULT); + + /* Peer2Peer negotiation */ + ep->rtr_type = MPA_RTR_TYPE_NONE; + if (mpa_ird & MPA_V2_PEER2PEER_MODEL) { + if (mpa_ord & MPA_V2_WRITE_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_WRITE; + + if (mpa_ord & MPA_V2_READ_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_READ; + + if (mpa_ird & MPA_V2_SEND_RTR) + ep->rtr_type |= MPA_RTR_TYPE_ZERO_SEND; + + ep->rtr_type &= iwarp_info->rtr_type; + + /* if we're left with no match send our capabilities */ + if (ep->rtr_type == MPA_RTR_TYPE_NONE) + ep->rtr_type = iwarp_info->rtr_type; + } + + ep->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + } else { + ep->cm_info.ord = QED_IWARP_ORD_DEFAULT; + ep->cm_info.ird = QED_IWARP_IRD_DEFAULT; + ep->mpa_rev = MPA_NEGOTIATION_TYPE_BASIC; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x rtr:0x%x ulp_data_len = %x mpa_hdr_size = %x\n", + mpa_rev, ep->cm_info.ord, ep->cm_info.ird, ep->rtr_type, + async_data->mpa_request.ulp_data_len, mpa_hdr_size); + + /* Strip mpa v2 hdr from private data before sending to upper layer */ + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_hdr_size; + + ulp_data_len = le16_to_cpu(async_data->mpa_request.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_hdr_size; + + params.event = QED_IWARP_EVENT_MPA_REQUEST; + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->state = QED_IWARP_EP_MPA_REQ_RCVD; + ep->event_cb(ep->cb_context, ¶ms); +} + +static int +qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; + struct mpa_outgoing_params *common; + struct qed_iwarp_info *iwarp_info; + struct qed_sp_init_data init_data; + dma_addr_t async_output_phys; + struct qed_spq_entry *p_ent; + dma_addr_t out_pdata_phys; + dma_addr_t in_pdata_phys; + struct qed_rdma_qp *qp; + bool reject; + u32 val; + int rc; + + if (!ep) + return -EINVAL; + + qp = ep->qp; + reject = !qp; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = reject ? ep->tcp_cid : qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + init_data.comp_mode = QED_SPQ_MODE_CB; + else + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD, + PROTOCOLID_IWARP, &init_data); + if (rc) + return rc; + + p_mpa_ramrod = &p_ent->ramrod.iwarp_mpa_offload; + common = &p_mpa_ramrod->common; + + out_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, out_pdata); + DMA_REGPAIR_LE(common->outgoing_ulp_buffer.addr, out_pdata_phys); + + val = ep->cm_info.private_data_len; + common->outgoing_ulp_buffer.len = cpu_to_le16(val); + common->crc_needed = p_hwfn->p_rdma_info->iwarp.crc_needed; + + common->out_rq.ord = cpu_to_le32(ep->cm_info.ord); + common->out_rq.ird = cpu_to_le32(ep->cm_info.ird); + + val = p_hwfn->hw_info.opaque_fid << 16 | ep->tcp_cid; + p_mpa_ramrod->tcp_cid = cpu_to_le32(val); + + in_pdata_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, in_pdata); + p_mpa_ramrod->tcp_connect_side = ep->connect_mode; + DMA_REGPAIR_LE(p_mpa_ramrod->incoming_ulp_buffer.addr, + in_pdata_phys); + p_mpa_ramrod->incoming_ulp_buffer.len = + cpu_to_le16(sizeof(ep->ep_buffer_virt->in_pdata)); + async_output_phys = ep->ep_buffer_phys + + offsetof(struct qed_iwarp_ep_memory, async_output); + DMA_REGPAIR_LE(p_mpa_ramrod->async_eqe_output_buf, + async_output_phys); + p_mpa_ramrod->handle_for_async.hi = cpu_to_le32(PTR_HI(ep)); + p_mpa_ramrod->handle_for_async.lo = cpu_to_le32(PTR_LO(ep)); + + if (!reject) { + DMA_REGPAIR_LE(p_mpa_ramrod->shared_queue_addr, + qp->shared_queue_phys_addr); + p_mpa_ramrod->stats_counter_id = + RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + qp->stats_queue; + } else { + common->reject = 1; + } + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + p_mpa_ramrod->rcv_wnd = cpu_to_le16(iwarp_info->rcv_wnd_size); + p_mpa_ramrod->mode = ep->mpa_rev; + SET_FIELD(p_mpa_ramrod->rtr_pref, + IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); + + ep->state = QED_IWARP_EP_MPA_OFFLOADED; + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (!reject) + ep->cid = qp->icid; /* Now they're migrated. */ + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) MPA Offload rc = %d IRD=0x%x ORD=0x%x rtr_type=%d mpa_rev=%d reject=%d\n", + reject ? 0xffff : qp->icid, + ep->tcp_cid, + rc, + ep->cm_info.ird, + ep->cm_info.ord, ep->rtr_type, ep->mpa_rev, reject); + return rc; +} + +static void +qed_iwarp_return_ep(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + ep->state = QED_IWARP_EP_INIT; + if (ep->qp) + ep->qp->ep = NULL; + ep->qp = NULL; + memset(&ep->cm_info, 0, sizeof(ep->cm_info)); + + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + /* We don't care about the return code, it's ok if tcp_cid + * remains invalid...in this case we'll defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + } + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + list_move_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); +} + +static void +qed_iwarp_parse_private_data(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct mpa_v2_hdr *mpa_v2_params; + union async_output *async_data; + u16 mpa_ird, mpa_ord; + u8 mpa_data_size = 0; + u16 ulp_data_len; + + if (MPA_REV2(p_hwfn->p_rdma_info->iwarp.mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)(ep->ep_buffer_virt->in_pdata); + mpa_data_size = sizeof(*mpa_v2_params); + mpa_ird = ntohs(mpa_v2_params->ird); + mpa_ord = ntohs(mpa_v2_params->ord); + + ep->cm_info.ird = (u8)(mpa_ord & MPA_V2_IRD_ORD_MASK); + ep->cm_info.ord = (u8)(mpa_ird & MPA_V2_IRD_ORD_MASK); + } + + async_data = &ep->ep_buffer_virt->async_output; + ep->cm_info.private_data = ep->ep_buffer_virt->in_pdata + mpa_data_size; + + ulp_data_len = le16_to_cpu(async_data->mpa_response.ulp_data_len); + ep->cm_info.private_data_len = ulp_data_len - mpa_data_size; +} + +static void +qed_iwarp_mpa_reply_arrived(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + struct qed_iwarp_cm_event_params params; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + DP_NOTICE(p_hwfn, + "MPA reply event not expected on passive side!\n"); + return; + } + + params.event = QED_IWARP_EVENT_ACTIVE_MPA_REPLY; + + qed_iwarp_parse_private_data(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + params.ep_context = ep; + params.status = 0; + + ep->mpa_reply_processed = true; + + ep->event_cb(ep->cb_context, ¶ms); +} + +#define QED_IWARP_CONNECT_MODE_STRING(ep) \ + ((ep)->connect_mode == TCP_CONNECT_PASSIVE) ? "Passive" : "Active" + +/* Called as a result of the event: + * IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE + */ +static void +qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE) + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + else + params.event = QED_IWARP_EVENT_PASSIVE_COMPLETE; + + if (ep->connect_mode == TCP_CONNECT_ACTIVE && !ep->mpa_reply_processed) + qed_iwarp_parse_private_data(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_NEGOTIATE (v%d): ORD: 0x%x IRD: 0x%x\n", + ep->mpa_rev, ep->cm_info.ord, ep->cm_info.ird); + + params.cm_info = &ep->cm_info; + + params.ep_context = ep; + + switch (fw_return_code) { + case RDMA_RETURN_OK: + ep->qp->max_rd_atomic_req = ep->cm_info.ord; + ep->qp->max_rd_atomic_resp = ep->cm_info.ird; + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_RTS, 1); + ep->state = QED_IWARP_EP_ESTABLISHED; + params.status = 0; + break; + case IWARP_CONN_ERROR_MPA_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_ERROR_REJECT: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Reject\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RST: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA reset(tcp cid: 0x%x)\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid, + ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_MPA_FIN: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA received FIN\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INSUF_IRD: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA insufficient ird\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_RTR_MISMATCH: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA RTR MISMATCH\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_LOCAL_ERROR: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Local Error\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_TERMINATE: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA TERMINATE\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->cid); + params.status = -ECONNREFUSED; + break; + default: + params.status = -ECONNRESET; + break; + } + + if (fw_return_code != RDMA_RETURN_OK) + /* paired with READ_ONCE in destroy_qp */ + smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); + + ep->event_cb(ep->cb_context, ¶ms); + + /* on passive side, if there is no associated QP (REJECT) we need to + * return the ep to the pool, (in the regular case we add an element + * in accept instead of this one. + * In both cases we need to remove it from the ep_list. + */ + if (fw_return_code != RDMA_RETURN_OK) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + if ((ep->connect_mode == TCP_CONNECT_PASSIVE) && + (!ep->qp)) { /* Rejected */ + qed_iwarp_return_ep(p_hwfn, ep); + } else { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + } +} + +static void +qed_iwarp_mpa_v2_set_private(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 *mpa_data_size) +{ + struct mpa_v2_hdr *mpa_v2_params; + u16 mpa_ird, mpa_ord; + + *mpa_data_size = 0; + if (MPA_REV2(ep->mpa_rev)) { + mpa_v2_params = + (struct mpa_v2_hdr *)ep->ep_buffer_virt->out_pdata; + *mpa_data_size = sizeof(*mpa_v2_params); + + mpa_ird = (u16)ep->cm_info.ird; + mpa_ord = (u16)ep->cm_info.ord; + + if (ep->rtr_type != MPA_RTR_TYPE_NONE) { + mpa_ird |= MPA_V2_PEER2PEER_MODEL; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_SEND) + mpa_ird |= MPA_V2_SEND_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_WRITE) + mpa_ord |= MPA_V2_WRITE_RTR; + + if (ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) + mpa_ord |= MPA_V2_READ_RTR; + } + + mpa_v2_params->ird = htons(mpa_ird); + mpa_v2_params->ord = htons(mpa_ord); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_NEGOTIATE Header: [%x ord:%x ird] %x ord:%x ird:%x peer2peer:%x rtr_send:%x rtr_write:%x rtr_read:%x\n", + mpa_v2_params->ird, + mpa_v2_params->ord, + *((u32 *)mpa_v2_params), + mpa_ord & MPA_V2_IRD_ORD_MASK, + mpa_ird & MPA_V2_IRD_ORD_MASK, + !!(mpa_ird & MPA_V2_PEER2PEER_MODEL), + !!(mpa_ird & MPA_V2_SEND_RTR), + !!(mpa_ord & MPA_V2_WRITE_RTR), + !!(mpa_ord & MPA_V2_READ_RTR)); + } +} + +int qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_info *iwarp_info; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + u32 cid; + int rc; + + if ((iparams->cm_info.ord > QED_IWARP_ORD_DEFAULT) || + (iparams->cm_info.ird > QED_IWARP_IRD_DEFAULT)) { + DP_NOTICE(p_hwfn, + "QP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, iparams->cm_info.ord, + iparams->cm_info.ird); + + return -EINVAL; + } + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + /* Allocate ep object */ + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + return rc; + + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + goto err; + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->qp = iparams->qp; + ep->qp->ep = ep; + ether_addr_copy(ep->remote_mac_addr, iparams->remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, iparams->local_mac_addr); + memcpy(&ep->cm_info, &iparams->cm_info, sizeof(ep->cm_info)); + + ep->cm_info.ord = iparams->cm_info.ord; + ep->cm_info.ird = iparams->cm_info.ird; + + ep->rtr_type = iwarp_info->rtr_type; + if (!iwarp_info->peer2peer) + ep->rtr_type = MPA_RTR_TYPE_NONE; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && (ep->cm_info.ord == 0)) + ep->cm_info.ord = 1; + + ep->mpa_rev = iwarp_info->mpa_rev; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->cm_info.private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->cm_info.private_data, + iparams->cm_info.private_data_len); + + ep->mss = iparams->mss; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = iparams->event_cb; + ep->cb_context = iparams->cb_context; + ep->connect_mode = TCP_CONNECT_ACTIVE; + + oparams->ep_context = ep; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x) rc = %d\n", + iparams->qp->icid, ep->tcp_cid, rc); + + if (rc) { + qed_iwarp_destroy_ep(p_hwfn, ep, true); + goto err; + } + + return rc; +err: + qed_iwarp_cid_cleaned(p_hwfn, cid); + + return rc; +} + +static struct qed_iwarp_ep *qed_iwarp_get_free_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep = NULL; + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + DP_ERR(p_hwfn, "Ep list is empty\n"); + goto out; + } + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + /* in some cases we could have failed allocating a tcp cid when added + * from accept / failure... retry now..this is not the common case. + */ + if (ep->tcp_cid == QED_IWARP_INVALID_TCP_CID) { + rc = qed_iwarp_alloc_tcp_cid(p_hwfn, &ep->tcp_cid); + + /* if we fail we could look for another entry with a valid + * tcp_cid, but since we don't expect to reach this anyway + * it's not worth the handling + */ + if (rc) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + ep = NULL; + goto out; + } + } + + list_del(&ep->list_entry); + +out: + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + return ep; +} + +#define QED_IWARP_MAX_CID_CLEAN_TIME 100 +#define QED_IWARP_MAX_NO_PROGRESS_CNT 5 + +/* This function waits for all the bits of a bmap to be cleared, as long as + * there is progress ( i.e. the number of bits left to be cleared decreases ) + * the function continues. + */ +static int +qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap) +{ + int prev_weight = 0; + int wait_count = 0; + int weight = 0; + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + prev_weight = weight; + + while (weight) { + /* If the HW device is during recovery, all resources are + * immediately reset without receiving a per-cid indication + * from HW. In this case we don't expect the cid_map to be + * cleared. + */ + if (p_hwfn->cdev->recov_in_prog) + return 0; + + msleep(QED_IWARP_MAX_CID_CLEAN_TIME); + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + + if (prev_weight == weight) { + wait_count++; + } else { + prev_weight = weight; + wait_count = 0; + } + + if (wait_count > QED_IWARP_MAX_NO_PROGRESS_CNT) { + DP_NOTICE(p_hwfn, + "%s bitmap wait timed out (%d cids pending)\n", + bmap->name, weight); + return -EBUSY; + } + } + return 0; +} + +static int qed_iwarp_wait_for_all_cids(struct qed_hwfn *p_hwfn) +{ + int rc; + int i; + + rc = qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->tcp_cid_map); + if (rc) + return rc; + + /* Now free the tcp cids from the main cid map */ + for (i = 0; i < QED_IWARP_PREALLOC_CNT; i++) + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, i); + + /* Now wait for all cids to be completed */ + return qed_iwarp_wait_cid_map_cleared(p_hwfn, + &p_hwfn->p_rdma_info->cid_map); +} + +static void qed_iwarp_free_prealloc_ep(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_ep *ep; + + while (!list_empty(&p_hwfn->p_rdma_info->iwarp.ep_free_list)) { + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep = list_first_entry(&p_hwfn->p_rdma_info->iwarp.ep_free_list, + struct qed_iwarp_ep, list_entry); + + if (!ep) { + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + break; + } + list_del(&ep->list_entry); + + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + if (ep->tcp_cid != QED_IWARP_INVALID_TCP_CID) + qed_iwarp_cid_cleaned(p_hwfn, ep->tcp_cid); + + qed_iwarp_destroy_ep(p_hwfn, ep, false); + } +} + +static int qed_iwarp_prealloc_ep(struct qed_hwfn *p_hwfn, bool init) +{ + struct qed_iwarp_ep *ep; + int rc = 0; + int count; + u32 cid; + int i; + + count = init ? QED_IWARP_PREALLOC_CNT : 1; + for (i = 0; i < count; i++) { + rc = qed_iwarp_create_ep(p_hwfn, &ep); + if (rc) + return rc; + + /* During initialization we allocate from the main pool, + * afterwards we allocate only from the tcp_cid. + */ + if (init) { + rc = qed_iwarp_alloc_cid(p_hwfn, &cid); + if (rc) + goto err; + qed_iwarp_set_tcp_cid(p_hwfn, cid); + } else { + /* We don't care about the return code, it's ok if + * tcp_cid remains invalid...in this case we'll + * defer allocation + */ + qed_iwarp_alloc_tcp_cid(p_hwfn, &cid); + } + + ep->tcp_cid = cid; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, + &p_hwfn->p_rdma_info->iwarp.ep_free_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } + + return rc; + +err: + qed_iwarp_destroy_ep(p_hwfn, ep, false); + + return rc; +} + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn) +{ + int rc; + + /* Allocate bitmap for tcp cid. These are used by passive side + * to ensure it can allocate a tcp cid during dpc that was + * pre-acquired and doesn't require dynamic allocation of ilt + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, + QED_IWARP_PREALLOC_CNT, "TCP_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate tcp cid, rc = %d\n", rc); + return rc; + } + + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_free_list); + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + rc = qed_iwarp_prealloc_ep(p_hwfn, true); + if (rc) + return rc; + + return qed_ooo_alloc(p_hwfn); +} + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + qed_ooo_free(p_hwfn); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tcp_cid_map, 1); + kfree(iwarp_info->mpa_bufs); + kfree(iwarp_info->partial_fpdus); + kfree(iwarp_info->mpa_intermediate_buf); +} + +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + int rc; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in accept is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + iparams->qp->icid, ep->tcp_cid); + + if ((iparams->ord > QED_IWARP_ORD_DEFAULT) || + (iparams->ird > QED_IWARP_IRD_DEFAULT)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) EP(0x%x) ERROR: Invalid ord(0x%x)/ird(0x%x)\n", + iparams->qp->icid, + ep->tcp_cid, iparams->ord, iparams->ord); + return -EINVAL; + } + + qed_iwarp_prealloc_ep(p_hwfn, false); + + ep->cb_context = iparams->cb_context; + ep->qp = iparams->qp; + ep->qp->ep = ep; + + if (ep->mpa_rev == MPA_NEGOTIATION_TYPE_ENHANCED) { + /* Negotiate ord/ird: if upperlayer requested ord larger than + * ird advertised by remote, we need to decrease our ord + */ + if (iparams->ord > ep->cm_info.ird) + iparams->ord = ep->cm_info.ird; + + if ((ep->rtr_type & MPA_RTR_TYPE_ZERO_READ) && + (iparams->ird == 0)) + iparams->ird = 1; + } + + /* Update cm_info ord/ird to be negotiated values */ + ep->cm_info.ord = iparams->ord; + ep->cm_info.ird = iparams->ird; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + rc = qed_iwarp_mpa_offload(p_hwfn, ep); + if (rc) + qed_iwarp_modify_qp(p_hwfn, + iparams->qp, QED_IWARP_QP_STATE_ERROR, 1); + + return rc; +} + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_ep *ep; + u8 mpa_data_size = 0; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in reject is NULL\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x)\n", ep->tcp_cid); + + ep->cb_context = iparams->cb_context; + ep->qp = NULL; + + qed_iwarp_mpa_v2_set_private(p_hwfn, ep, &mpa_data_size); + + ep->cm_info.private_data = ep->ep_buffer_virt->out_pdata; + ep->cm_info.private_data_len = iparams->private_data_len + + mpa_data_size; + + memcpy((u8 *)ep->ep_buffer_virt->out_pdata + mpa_data_size, + iparams->private_data, iparams->private_data_len); + + return qed_iwarp_mpa_offload(p_hwfn, ep); +} + +static void +qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "ip_version = %d\n", + cm_info->ip_version); + + if (cm_info->ip_version == QED_TCP_IPV4) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI4h:%x, local_ip %pI4h:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + else + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", + cm_info->remote_ip, cm_info->remote_port, + cm_info->local_ip, cm_info->local_port, + cm_info->vlan); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "private_data_len = %x ord = %d, ird = %d\n", + cm_info->private_data_len, cm_info->ord, cm_info->ird); +} + +static int +qed_iwarp_ll2_post_rx(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ll2_buff *buf, u8 handle) +{ + int rc; + + rc = qed_ll2_post_rx_buffer(p_hwfn, handle, buf->data_phys_addr, + (u16)buf->buff_size, buf, 1); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to repost rx buffer to ll2 rc = %d, handle=%d\n", + rc, handle); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buf->buff_size, + buf->data, buf->data_phys_addr); + kfree(buf); + } + + return rc; +} + +static bool +qed_iwarp_ep_exists(struct qed_hwfn *p_hwfn, struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_ep *ep = NULL; + bool found = false; + + list_for_each_entry(ep, + &p_hwfn->p_rdma_info->iwarp.ep_list, + list_entry) { + if ((ep->cm_info.local_port == cm_info->local_port) && + (ep->cm_info.remote_port == cm_info->remote_port) && + (ep->cm_info.vlan == cm_info->vlan) && + !memcmp(&ep->cm_info.local_ip, cm_info->local_ip, + sizeof(cm_info->local_ip)) && + !memcmp(&ep->cm_info.remote_ip, cm_info->remote_ip, + sizeof(cm_info->remote_ip))) { + found = true; + break; + } + } + + if (found) { + DP_NOTICE(p_hwfn, + "SYN received on active connection - dropping\n"); + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + return true; + } + + return false; +} + +static struct qed_iwarp_listener * +qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info) +{ + struct qed_iwarp_listener *listener = NULL; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + bool found = false; + + list_for_each_entry(listener, + &p_hwfn->p_rdma_info->iwarp.listen_list, + list_entry) { + if (listener->port == cm_info->local_port) { + if (!memcmp(listener->ip_addr, + ip_zero, sizeof(ip_zero))) { + found = true; + break; + } + + if (!memcmp(listener->ip_addr, + cm_info->local_ip, + sizeof(cm_info->local_ip)) && + (listener->vlan == cm_info->vlan)) { + found = true; + break; + } + } + } + + if (found) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener found = %p\n", + listener); + return listener; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "listener not found\n"); + return NULL; +} + +static int +qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_cm_info *cm_info, + void *buf, + u8 *remote_mac_addr, + u8 *local_mac_addr, + int *payload_len, int *tcp_start_offset) +{ + struct vlan_ethhdr *vethh; + bool vlan_valid = false; + struct ipv6hdr *ip6h; + struct ethhdr *ethh; + struct tcphdr *tcph; + struct iphdr *iph; + int eth_hlen; + int ip_hlen; + int eth_type; + int i; + + ethh = buf; + eth_type = ntohs(ethh->h_proto); + if (eth_type == ETH_P_8021Q) { + vlan_valid = true; + vethh = (struct vlan_ethhdr *)ethh; + cm_info->vlan = ntohs(vethh->h_vlan_TCI) & VLAN_VID_MASK; + eth_type = ntohs(vethh->h_vlan_encapsulated_proto); + } + + eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); + + if (!ether_addr_equal(ethh->h_dest, + p_hwfn->p_rdma_info->iwarp.mac_addr)) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "Got unexpected mac %pM instead of %pM\n", + ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); + return -EINVAL; + } + + ether_addr_copy(remote_mac_addr, ethh->h_source); + ether_addr_copy(local_mac_addr, ethh->h_dest); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_type =%d source mac: %pM\n", + eth_type, ethh->h_source); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "eth_hlen=%d destination mac: %pM\n", + eth_hlen, ethh->h_dest); + + iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); + + if (eth_type == ETH_P_IP) { + if (iph->protocol != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + + cm_info->local_ip[0] = ntohl(iph->daddr); + cm_info->remote_ip[0] = ntohl(iph->saddr); + cm_info->ip_version = QED_TCP_IPV4; + + ip_hlen = (iph->ihl) * sizeof(u32); + *payload_len = ntohs(iph->tot_len) - ip_hlen; + } else if (eth_type == ETH_P_IPV6) { + ip6h = (struct ipv6hdr *)iph; + + if (ip6h->nexthdr != IPPROTO_TCP) { + DP_NOTICE(p_hwfn, + "Unexpected ip protocol on ll2 %x\n", + iph->protocol); + return -EINVAL; + } + + for (i = 0; i < 4; i++) { + cm_info->local_ip[i] = + ntohl(ip6h->daddr.in6_u.u6_addr32[i]); + cm_info->remote_ip[i] = + ntohl(ip6h->saddr.in6_u.u6_addr32[i]); + } + cm_info->ip_version = QED_TCP_IPV6; + + ip_hlen = sizeof(*ip6h); + *payload_len = ntohs(ip6h->payload_len); + } else { + DP_NOTICE(p_hwfn, "Unexpected ethertype on ll2 %x\n", eth_type); + return -EINVAL; + } + + tcph = (struct tcphdr *)((u8 *)iph + ip_hlen); + + if (!tcph->syn) { + DP_NOTICE(p_hwfn, + "Only SYN type packet expected on this ll2 conn, iph->ihl=%d source=%d dest=%d\n", + iph->ihl, tcph->source, tcph->dest); + return -EINVAL; + } + + cm_info->local_port = ntohs(tcph->dest); + cm_info->remote_port = ntohs(tcph->source); + + qed_iwarp_print_cm_info(p_hwfn, cm_info); + + *tcp_start_offset = eth_hlen + ip_hlen; + + return 0; +} + +static struct qed_iwarp_fpdu *qed_iwarp_get_curr_fpdu(struct qed_hwfn *p_hwfn, + u16 cid) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_fpdu *partial_fpdu; + u32 idx; + + idx = cid - qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_IWARP); + if (idx >= iwarp_info->max_num_partial_fpdus) { + DP_ERR(p_hwfn, "Invalid cid %x max_num_partial_fpdus=%x\n", cid, + iwarp_info->max_num_partial_fpdus); + return NULL; + } + + partial_fpdu = &iwarp_info->partial_fpdus[idx]; + + return partial_fpdu; +} + +enum qed_iwarp_mpa_pkt_type { + QED_IWARP_MPA_PKT_PACKED, + QED_IWARP_MPA_PKT_PARTIAL, + QED_IWARP_MPA_PKT_UNALIGNED +}; + +#define QED_IWARP_INVALID_FPDU_LENGTH 0xffff +#define QED_IWARP_MPA_FPDU_LENGTH_SIZE (2) +#define QED_IWARP_MPA_CRC32_DIGEST_SIZE (4) + +/* Pad to multiple of 4 */ +#define QED_IWARP_PDU_DATA_LEN_WITH_PAD(data_len) ALIGN(data_len, 4) +#define QED_IWARP_FPDU_LEN_WITH_PAD(_mpa_len) \ + (QED_IWARP_PDU_DATA_LEN_WITH_PAD((_mpa_len) + \ + QED_IWARP_MPA_FPDU_LENGTH_SIZE) + \ + QED_IWARP_MPA_CRC32_DIGEST_SIZE) + +/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */ +#define QED_IWARP_MAX_BDS_PER_FPDU 3 + +static const char * const pkt_type_str[] = { + "QED_IWARP_MPA_PKT_PACKED", + "QED_IWARP_MPA_PKT_PARTIAL", + "QED_IWARP_MPA_PKT_UNALIGNED" +}; + +static int +qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct qed_iwarp_ll2_buff *buf); + +static enum qed_iwarp_mpa_pkt_type +qed_iwarp_mpa_classify(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + u16 tcp_payload_len, u8 *mpa_data) +{ + enum qed_iwarp_mpa_pkt_type pkt_type; + u16 mpa_len; + + if (fpdu->incomplete_bytes) { + pkt_type = QED_IWARP_MPA_PKT_UNALIGNED; + goto out; + } + + /* special case of one byte remaining... + * lower byte will be read next packet + */ + if (tcp_payload_len == 1) { + fpdu->fpdu_length = *mpa_data << BITS_PER_BYTE; + pkt_type = QED_IWARP_MPA_PKT_PARTIAL; + goto out; + } + + mpa_len = ntohs(*(__force __be16 *)mpa_data); + fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); + + if (fpdu->fpdu_length <= tcp_payload_len) + pkt_type = QED_IWARP_MPA_PKT_PACKED; + else + pkt_type = QED_IWARP_MPA_PKT_PARTIAL; + +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA_ALIGN: %s: fpdu_length=0x%x tcp_payload_len:0x%x\n", + pkt_type_str[pkt_type], fpdu->fpdu_length, tcp_payload_len); + + return pkt_type; +} + +static void +qed_iwarp_init_fpdu(struct qed_iwarp_ll2_buff *buf, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *pkt_data, + u16 tcp_payload_size, u8 placement_offset) +{ + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); + + fpdu->mpa_buf = buf; + fpdu->pkt_hdr = buf->data_phys_addr + placement_offset; + fpdu->pkt_hdr_size = pkt_data->tcp_payload_offset; + fpdu->mpa_frag = buf->data_phys_addr + first_mpa_offset; + fpdu->mpa_frag_virt = (u8 *)(buf->data) + first_mpa_offset; + + if (tcp_payload_size == 1) + fpdu->incomplete_bytes = QED_IWARP_INVALID_FPDU_LENGTH; + else if (tcp_payload_size < fpdu->fpdu_length) + fpdu->incomplete_bytes = fpdu->fpdu_length - tcp_payload_size; + else + fpdu->incomplete_bytes = 0; /* complete fpdu */ + + fpdu->mpa_frag_len = fpdu->fpdu_length - fpdu->incomplete_bytes; +} + +static int +qed_iwarp_cp_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *pkt_data, + struct qed_iwarp_ll2_buff *buf, u16 tcp_payload_size) +{ + u16 first_mpa_offset = le16_to_cpu(pkt_data->first_mpa_offset); + u8 *tmp_buf = p_hwfn->p_rdma_info->iwarp.mpa_intermediate_buf; + int rc; + + /* need to copy the data from the partial packet stored in fpdu + * to the new buf, for this we also need to move the data currently + * placed on the buf. The assumption is that the buffer is big enough + * since fpdu_length <= mss, we use an intermediate buffer since + * we may need to copy the new data to an overlapping location + */ + if ((fpdu->mpa_frag_len + tcp_payload_size) > (u16)buf->buff_size) { + DP_ERR(p_hwfn, + "MPA ALIGN: Unexpected: buffer is not large enough for split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", + buf->buff_size, fpdu->mpa_frag_len, + tcp_payload_size, fpdu->incomplete_bytes); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "MPA ALIGN Copying fpdu: [%p, %d] [%p, %d]\n", + fpdu->mpa_frag_virt, fpdu->mpa_frag_len, + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); + + memcpy(tmp_buf, fpdu->mpa_frag_virt, fpdu->mpa_frag_len); + memcpy(tmp_buf + fpdu->mpa_frag_len, + (u8 *)(buf->data) + first_mpa_offset, tcp_payload_size); + + rc = qed_iwarp_recycle_pkt(p_hwfn, fpdu, fpdu->mpa_buf); + if (rc) + return rc; + + /* If we managed to post the buffer copy the data to the new buffer + * o/w this will occur in the next round... + */ + memcpy((u8 *)(buf->data), tmp_buf, + fpdu->mpa_frag_len + tcp_payload_size); + + fpdu->mpa_buf = buf; + /* fpdu->pkt_hdr remains as is */ + /* fpdu->mpa_frag is overridden with new buf */ + fpdu->mpa_frag = buf->data_phys_addr; + fpdu->mpa_frag_virt = buf->data; + fpdu->mpa_frag_len += tcp_payload_size; + + fpdu->incomplete_bytes -= tcp_payload_size; + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA ALIGN: split fpdu buff_size = %d mpa_frag_len = %d, tcp_payload_size = %d, incomplete_bytes = %d\n", + buf->buff_size, fpdu->mpa_frag_len, tcp_payload_size, + fpdu->incomplete_bytes); + + return 0; +} + +static void +qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, u8 *mpa_data) +{ + u16 mpa_len; + + /* Update incomplete packets if needed */ + if (fpdu->incomplete_bytes == QED_IWARP_INVALID_FPDU_LENGTH) { + /* Missing lower byte is now available */ + mpa_len = fpdu->fpdu_length | *mpa_data; + fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); + /* one byte of hdr */ + fpdu->mpa_frag_len = 1; + fpdu->incomplete_bytes = fpdu->fpdu_length - 1; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Partial header mpa_len=%x fpdu_length=%x incomplete_bytes=%x\n", + mpa_len, fpdu->fpdu_length, fpdu->incomplete_bytes); + } +} + +#define QED_IWARP_IS_RIGHT_EDGE(_curr_pkt) \ + (GET_FIELD((_curr_pkt)->flags, \ + UNALIGNED_OPAQUE_DATA_PKT_REACHED_WIN_RIGHT_EDGE)) + +/* This function is used to recycle a buffer using the ll2 drop option. It + * uses the mechanism to ensure that all buffers posted to tx before this one + * were completed. The buffer sent here will be sent as a cookie in the tx + * completion function and can then be reposted to rx chain when done. The flow + * that requires this is the flow where a FPDU splits over more than 3 tcp + * segments. In this case the driver needs to re-post a rx buffer instead of + * the one received, but driver can't simply repost a buffer it copied from + * as there is a case where the buffer was originally a packed FPDU, and is + * partially posted to FW. Driver needs to ensure FW is done with it. + */ +static int +qed_iwarp_recycle_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct qed_iwarp_ll2_buff *buf) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + buf->piggy_buf = NULL; + tx_pkt.cookie = buf; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't drop packet rc=%d\n", rc); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: send drop tx packet [%lx, 0x%x], buf=%p, rc=%d\n", + (unsigned long int)tx_pkt.first_frag, + tx_pkt.first_frag_len, buf, rc); + + return rc; +} + +static int +qed_iwarp_win_right_edge(struct qed_hwfn *p_hwfn, struct qed_iwarp_fpdu *fpdu) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; + + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + tx_pkt.enable_ip_cksum = true; + tx_pkt.enable_l4_cksum = true; + tx_pkt.calc_ip_len = true; + /* vlan overload with enum iwarp_ll2_tx_queues */ + tx_pkt.vlan = IWARP_LL2_ALIGNED_RIGHT_TRIMMED_TX_QUEUE; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send right edge rc=%d\n", rc); + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Sent right edge FPDU num_bds=%d [%lx, 0x%x], rc=%d\n", + tx_pkt.num_of_bds, + (unsigned long int)tx_pkt.first_frag, + tx_pkt.first_frag_len, rc); + + return rc; +} + +static int +qed_iwarp_send_fpdu(struct qed_hwfn *p_hwfn, + struct qed_iwarp_fpdu *fpdu, + struct unaligned_opaque_data *curr_pkt, + struct qed_iwarp_ll2_buff *buf, + u16 tcp_payload_size, enum qed_iwarp_mpa_pkt_type pkt_type) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + u16 first_mpa_offset; + u8 ll2_handle; + int rc; + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + + /* An unaligned packet means it's split over two tcp segments. So the + * complete packet requires 3 bds, one for the header, one for the + * part of the fpdu of the first tcp segment, and the last fragment + * will point to the remainder of the fpdu. A packed pdu, requires only + * two bds, one for the header and one for the data. + */ + tx_pkt.num_of_bds = (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED) ? 3 : 2; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.l4_hdr_offset_w = fpdu->pkt_hdr_size >> 2; /* offset in words */ + + /* Send the mpa_buf only with the last fpdu (in case of packed) */ + if (pkt_type == QED_IWARP_MPA_PKT_UNALIGNED || + tcp_payload_size <= fpdu->fpdu_length) + tx_pkt.cookie = fpdu->mpa_buf; + + tx_pkt.first_frag = fpdu->pkt_hdr; + tx_pkt.first_frag_len = fpdu->pkt_hdr_size; + tx_pkt.enable_ip_cksum = true; + tx_pkt.enable_l4_cksum = true; + tx_pkt.calc_ip_len = true; + /* vlan overload with enum iwarp_ll2_tx_queues */ + tx_pkt.vlan = IWARP_LL2_ALIGNED_TX_QUEUE; + + /* special case of unaligned packet and not packed, need to send + * both buffers as cookie to release. + */ + if (tcp_payload_size == fpdu->incomplete_bytes) + fpdu->mpa_buf->piggy_buf = buf; + + ll2_handle = p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle; + + /* Set first fragment to header */ + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_handle, &tx_pkt, true); + if (rc) + goto out; + + /* Set second fragment to first part of packet */ + rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, ll2_handle, + fpdu->mpa_frag, + fpdu->mpa_frag_len); + if (rc) + goto out; + + if (!fpdu->incomplete_bytes) + goto out; + + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + + /* Set third fragment to second part of the packet */ + rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, + ll2_handle, + buf->data_phys_addr + + first_mpa_offset, + fpdu->incomplete_bytes); +out: + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "MPA_ALIGN: Sent FPDU num_bds=%d first_frag_len=%x, mpa_frag_len=0x%x, incomplete_bytes:0x%x rc=%d\n", + tx_pkt.num_of_bds, + tx_pkt.first_frag_len, + fpdu->mpa_frag_len, + fpdu->incomplete_bytes, rc); + + return rc; +} + +static void +qed_iwarp_mpa_get_data(struct qed_hwfn *p_hwfn, + struct unaligned_opaque_data *curr_pkt, + u32 opaque_data0, u32 opaque_data1) +{ + u64 opaque_data; + + opaque_data = HILO_64(cpu_to_le32(opaque_data1), + cpu_to_le32(opaque_data0)); + *curr_pkt = *((struct unaligned_opaque_data *)&opaque_data); + + le16_add_cpu(&curr_pkt->first_mpa_offset, + curr_pkt->tcp_payload_offset); +} + +/* This function is called when an unaligned or incomplete MPA packet arrives + * driver needs to align the packet, perhaps using previous data and send + * it down to FW once it is aligned. + */ +static int +qed_iwarp_process_mpa_pkt(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ll2_mpa_buf *mpa_buf) +{ + struct unaligned_opaque_data *curr_pkt = &mpa_buf->data; + struct qed_iwarp_ll2_buff *buf = mpa_buf->ll2_buf; + enum qed_iwarp_mpa_pkt_type pkt_type; + struct qed_iwarp_fpdu *fpdu; + u16 cid, first_mpa_offset; + int rc = -EINVAL; + u8 *mpa_data; + + cid = le32_to_cpu(curr_pkt->cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); + if (!fpdu) { /* something corrupt with cid, post rx back */ + DP_ERR(p_hwfn, "Invalid cid, drop and post back to rx cid=%x\n", + cid); + goto err; + } + + do { + first_mpa_offset = le16_to_cpu(curr_pkt->first_mpa_offset); + mpa_data = ((u8 *)(buf->data) + first_mpa_offset); + + pkt_type = qed_iwarp_mpa_classify(p_hwfn, fpdu, + mpa_buf->tcp_payload_len, + mpa_data); + + switch (pkt_type) { + case QED_IWARP_MPA_PKT_PARTIAL: + qed_iwarp_init_fpdu(buf, fpdu, + curr_pkt, + mpa_buf->tcp_payload_len, + mpa_buf->placement_offset); + + if (!QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { + mpa_buf->tcp_payload_len = 0; + break; + } + + rc = qed_iwarp_win_right_edge(p_hwfn, fpdu); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:reset rc=%d\n", rc); + memset(fpdu, 0, sizeof(*fpdu)); + break; + } + + mpa_buf->tcp_payload_len = 0; + break; + case QED_IWARP_MPA_PKT_PACKED: + qed_iwarp_init_fpdu(buf, fpdu, + curr_pkt, + mpa_buf->tcp_payload_len, + mpa_buf->placement_offset); + + rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, + mpa_buf->tcp_payload_len, + pkt_type); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:reset rc=%d\n", rc); + memset(fpdu, 0, sizeof(*fpdu)); + break; + } + + mpa_buf->tcp_payload_len -= fpdu->fpdu_length; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->fpdu_length); + break; + case QED_IWARP_MPA_PKT_UNALIGNED: + qed_iwarp_update_fpdu_length(p_hwfn, fpdu, mpa_data); + if (mpa_buf->tcp_payload_len < fpdu->incomplete_bytes) { + /* special handling of fpdu split over more + * than 2 segments + */ + if (QED_IWARP_IS_RIGHT_EDGE(curr_pkt)) { + rc = qed_iwarp_win_right_edge(p_hwfn, + fpdu); + /* packet will be re-processed later */ + if (rc) + return rc; + } + + rc = qed_iwarp_cp_pkt(p_hwfn, fpdu, curr_pkt, + buf, + mpa_buf->tcp_payload_len); + if (rc) /* packet will be re-processed later */ + return rc; + + mpa_buf->tcp_payload_len = 0; + break; + } + + rc = qed_iwarp_send_fpdu(p_hwfn, fpdu, curr_pkt, buf, + mpa_buf->tcp_payload_len, + pkt_type); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Can't send FPDU:delay rc=%d\n", rc); + /* don't reset fpdu -> we need it for next + * classify + */ + break; + } + + mpa_buf->tcp_payload_len -= fpdu->incomplete_bytes; + le16_add_cpu(&curr_pkt->first_mpa_offset, + fpdu->incomplete_bytes); + + /* The framed PDU was sent - no more incomplete bytes */ + fpdu->incomplete_bytes = 0; + break; + } + } while (mpa_buf->tcp_payload_len && !rc); + + return rc; + +err: + qed_iwarp_ll2_post_rx(p_hwfn, + buf, + p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle); + return rc; +} + +static void qed_iwarp_process_pending_pkts(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + struct qed_iwarp_ll2_mpa_buf *mpa_buf = NULL; + int rc; + + while (!list_empty(&iwarp_info->mpa_buf_pending_list)) { + mpa_buf = list_first_entry(&iwarp_info->mpa_buf_pending_list, + struct qed_iwarp_ll2_mpa_buf, + list_entry); + + rc = qed_iwarp_process_mpa_pkt(p_hwfn, mpa_buf); + + /* busy means break and continue processing later, don't + * remove the buf from the pending list. + */ + if (rc == -EBUSY) + break; + + list_move_tail(&mpa_buf->list_entry, + &iwarp_info->mpa_buf_list); + + if (rc) { /* different error, don't continue */ + DP_NOTICE(p_hwfn, "process pkts failed rc=%d\n", rc); + break; + } + } +} + +static void +qed_iwarp_ll2_comp_mpa_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) +{ + struct qed_iwarp_ll2_mpa_buf *mpa_buf; + struct qed_iwarp_info *iwarp_info; + struct qed_hwfn *p_hwfn = cxt; + u16 first_mpa_offset; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + mpa_buf = list_first_entry(&iwarp_info->mpa_buf_list, + struct qed_iwarp_ll2_mpa_buf, list_entry); + if (!mpa_buf) { + DP_ERR(p_hwfn, "No free mpa buf\n"); + goto err; + } + + list_del(&mpa_buf->list_entry); + qed_iwarp_mpa_get_data(p_hwfn, &mpa_buf->data, + data->opaque_data_0, data->opaque_data_1); + + first_mpa_offset = le16_to_cpu(mpa_buf->data.first_mpa_offset); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "LL2 MPA CompRx payload_len:0x%x\tfirst_mpa_offset:0x%x\ttcp_payload_offset:0x%x\tflags:0x%x\tcid:0x%x\n", + data->length.packet_length, first_mpa_offset, + mpa_buf->data.tcp_payload_offset, mpa_buf->data.flags, + mpa_buf->data.cid); + + mpa_buf->ll2_buf = data->cookie; + mpa_buf->tcp_payload_len = data->length.packet_length - + first_mpa_offset; + + first_mpa_offset += data->u.placement_offset; + mpa_buf->data.first_mpa_offset = cpu_to_le16(first_mpa_offset); + mpa_buf->placement_offset = data->u.placement_offset; + + list_add_tail(&mpa_buf->list_entry, &iwarp_info->mpa_buf_pending_list); + + qed_iwarp_process_pending_pkts(p_hwfn); + return; +err: + qed_iwarp_ll2_post_rx(p_hwfn, data->cookie, + iwarp_info->ll2_mpa_handle); +} + +static void +qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) +{ + struct qed_iwarp_ll2_buff *buf = data->cookie; + struct qed_iwarp_listener *listener; + struct qed_ll2_tx_pkt_info tx_pkt; + struct qed_iwarp_cm_info cm_info; + struct qed_hwfn *p_hwfn = cxt; + u8 remote_mac_addr[ETH_ALEN]; + u8 local_mac_addr[ETH_ALEN]; + struct qed_iwarp_ep *ep; + int tcp_start_offset; + u8 ll2_syn_handle; + int payload_len; + u32 hdr_size; + int rc; + + memset(&cm_info, 0, sizeof(cm_info)); + ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + + /* Check if packet was received with errors... */ + if (data->err_flags) { + DP_NOTICE(p_hwfn, "Error received on SYN packet: 0x%x\n", + data->err_flags); + goto err; + } + + if (GET_FIELD(data->parse_flags, + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED) && + GET_FIELD(data->parse_flags, PARSING_AND_ERR_FLAGS_L4CHKSMERROR)) { + DP_NOTICE(p_hwfn, "Syn packet received with checksum error\n"); + goto err; + } + + rc = qed_iwarp_parse_rx_pkt(p_hwfn, &cm_info, (u8 *)(buf->data) + + data->u.placement_offset, remote_mac_addr, + local_mac_addr, &payload_len, + &tcp_start_offset); + if (rc) + goto err; + + /* Check if there is a listener for this 4-tuple+vlan */ + listener = qed_iwarp_get_listener(p_hwfn, &cm_info); + if (!listener) { + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "SYN received on tuple not listened on parse_flags=%d packet len=%d\n", + data->parse_flags, data->length.packet_length); + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.l4_hdr_offset_w = (data->length.packet_length) >> 2; + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + tx_pkt.first_frag = buf->data_phys_addr + + data->u.placement_offset; + tx_pkt.first_frag_len = data->length.packet_length; + tx_pkt.cookie = buf; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, ll2_syn_handle, + &tx_pkt, true); + + if (rc) { + DP_NOTICE(p_hwfn, + "Can't post SYN back to chip rc=%d\n", rc); + goto err; + } + return; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Received syn on listening port\n"); + /* There may be an open ep on this connection if this is a syn + * retrasnmit... need to make sure there isn't... + */ + if (qed_iwarp_ep_exists(p_hwfn, &cm_info)) + goto err; + + ep = qed_iwarp_get_free_ep(p_hwfn); + if (!ep) + goto err; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&ep->list_entry, &p_hwfn->p_rdma_info->iwarp.ep_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ether_addr_copy(ep->remote_mac_addr, remote_mac_addr); + ether_addr_copy(ep->local_mac_addr, local_mac_addr); + + memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); + + hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60); + ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; + ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); + + ep->event_cb = listener->event_cb; + ep->cb_context = listener->cb_context; + ep->connect_mode = TCP_CONNECT_PASSIVE; + + ep->syn = buf; + ep->syn_ip_payload_length = (u16)payload_len; + ep->syn_phy_addr = buf->data_phys_addr + data->u.placement_offset + + tcp_start_offset; + + rc = qed_iwarp_tcp_offload(p_hwfn, ep); + if (rc) { + qed_iwarp_return_ep(p_hwfn, ep); + goto err; + } + + return; +err: + qed_iwarp_ll2_post_rx(p_hwfn, buf, ll2_syn_handle); +} + +static void qed_iwarp_ll2_rel_rx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t rx_buf_addr, + bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_hwfn *p_hwfn = cxt; + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, + buffer->data, buffer->data_phys_addr); + kfree(buffer); +} + +static void qed_iwarp_ll2_comp_tx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_iwarp_ll2_buff *piggy; + struct qed_hwfn *p_hwfn = cxt; + + if (!buffer) /* can happen in packed mpa unaligned... */ + return; + + /* this was originally an rx packet, post it back */ + piggy = buffer->piggy_buf; + if (piggy) { + buffer->piggy_buf = NULL; + qed_iwarp_ll2_post_rx(p_hwfn, piggy, connection_handle); + } + + qed_iwarp_ll2_post_rx(p_hwfn, buffer, connection_handle); + + if (connection_handle == p_hwfn->p_rdma_info->iwarp.ll2_mpa_handle) + qed_iwarp_process_pending_pkts(p_hwfn); + + return; +} + +static void qed_iwarp_ll2_rel_tx_pkt(void *cxt, u8 connection_handle, + void *cookie, dma_addr_t first_frag_addr, + bool b_last_fragment, bool b_last_packet) +{ + struct qed_iwarp_ll2_buff *buffer = cookie; + struct qed_hwfn *p_hwfn = cxt; + + if (!buffer) + return; + + if (buffer->piggy_buf) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + buffer->piggy_buf->buff_size, + buffer->piggy_buf->data, + buffer->piggy_buf->data_phys_addr); + + kfree(buffer->piggy_buf); + } + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, buffer->buff_size, + buffer->data, buffer->data_phys_addr); + + kfree(buffer); +} + +/* The only slowpath for iwarp ll2 is unalign flush. When this completion + * is received, need to reset the FPDU. + */ +static void +qed_iwarp_ll2_slowpath(void *cxt, + u8 connection_handle, + u32 opaque_data_0, u32 opaque_data_1) +{ + struct unaligned_opaque_data unalign_data; + struct qed_hwfn *p_hwfn = cxt; + struct qed_iwarp_fpdu *fpdu; + u32 cid; + + qed_iwarp_mpa_get_data(p_hwfn, &unalign_data, + opaque_data_0, opaque_data_1); + + cid = le32_to_cpu(unalign_data.cid); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "(0x%x) Flush fpdu\n", cid); + + fpdu = qed_iwarp_get_curr_fpdu(p_hwfn, (u16)cid); + if (fpdu) + memset(fpdu, 0, sizeof(*fpdu)); +} + +static int qed_iwarp_ll2_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_iwarp_info *iwarp_info = &p_hwfn->p_rdma_info->iwarp; + int rc = 0; + + if (iwarp_info->ll2_syn_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_syn_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate syn connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_syn_handle); + iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; + } + + if (iwarp_info->ll2_ooo_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_ooo_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate ooo connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_ooo_handle); + iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; + } + + if (iwarp_info->ll2_mpa_handle != QED_IWARP_HANDLE_INVAL) { + rc = qed_ll2_terminate_connection(p_hwfn, + iwarp_info->ll2_mpa_handle); + if (rc) + DP_INFO(p_hwfn, "Failed to terminate mpa connection\n"); + + qed_ll2_release_connection(p_hwfn, iwarp_info->ll2_mpa_handle); + iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; + } + + qed_llh_remove_mac_filter(p_hwfn->cdev, 0, + p_hwfn->p_rdma_info->iwarp.mac_addr); + + return rc; +} + +static int +qed_iwarp_ll2_alloc_buffers(struct qed_hwfn *p_hwfn, + int num_rx_bufs, int buff_size, u8 ll2_handle) +{ + struct qed_iwarp_ll2_buff *buffer; + int rc = 0; + int i; + + for (i = 0; i < num_rx_bufs; i++) { + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + rc = -ENOMEM; + break; + } + + buffer->data = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + buff_size, + &buffer->data_phys_addr, + GFP_KERNEL); + if (!buffer->data) { + kfree(buffer); + rc = -ENOMEM; + break; + } + + buffer->buff_size = buff_size; + rc = qed_iwarp_ll2_post_rx(p_hwfn, buffer, ll2_handle); + if (rc) + /* buffers will be deallocated by qed_ll2 */ + break; + } + return rc; +} + +#define QED_IWARP_MAX_BUF_SIZE(mtu) \ + ALIGN((mtu) + ETH_HLEN + 2 * VLAN_HLEN + 2 + ETH_CACHE_LINE_SIZE, \ + ETH_CACHE_LINE_SIZE) + +static int +qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params, + u32 rcv_wnd_size) +{ + struct qed_iwarp_info *iwarp_info; + struct qed_ll2_acquire_data data; + struct qed_ll2_cbs cbs; + u32 buff_size; + u16 n_ooo_bufs; + int rc = 0; + int i; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + iwarp_info->ll2_syn_handle = QED_IWARP_HANDLE_INVAL; + iwarp_info->ll2_ooo_handle = QED_IWARP_HANDLE_INVAL; + iwarp_info->ll2_mpa_handle = QED_IWARP_HANDLE_INVAL; + + iwarp_info->max_mtu = params->max_mtu; + + ether_addr_copy(p_hwfn->p_rdma_info->iwarp.mac_addr, params->mac_addr); + + rc = qed_llh_add_mac_filter(p_hwfn->cdev, 0, params->mac_addr); + if (rc) + return rc; + + /* Start SYN connection */ + cbs.rx_comp_cb = qed_iwarp_ll2_comp_syn_pkt; + cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt; + cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt; + cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt; + cbs.slowpath_cb = NULL; + cbs.cookie = p_hwfn; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_IWARP; + /* SYN will use ctx based queues */ + data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX; + data.input.mtu = params->max_mtu; + data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; + data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; + data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ + data.input.tx_tc = PKT_LB_TC; + data.input.tx_dest = QED_LL2_TX_DEST_LB; + data.p_connection_handle = &iwarp_info->ll2_syn_handle; + data.cbs = &cbs; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to acquire LL2 connection\n"); + qed_llh_remove_mac_filter(p_hwfn->cdev, 0, params->mac_addr); + return rc; + } + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_syn_handle); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to establish LL2 connection\n"); + goto err; + } + + buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); + rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, + QED_IWARP_LL2_SYN_RX_SIZE, + buff_size, + iwarp_info->ll2_syn_handle); + if (rc) + goto err; + + /* Start OOO connection */ + data.input.conn_type = QED_LL2_TYPE_OOO; + /* OOO/unaligned will use legacy ll2 queues (ram based) */ + data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY; + data.input.mtu = params->max_mtu; + + n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) / + iwarp_info->max_mtu; + n_ooo_bufs = min_t(u32, n_ooo_bufs, QED_IWARP_LL2_OOO_MAX_RX_SIZE); + + data.input.rx_num_desc = n_ooo_bufs; + data.input.rx_num_ooo_buffers = n_ooo_bufs; + + data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ + data.input.tx_num_desc = QED_IWARP_LL2_OOO_DEF_TX_SIZE; + data.p_connection_handle = &iwarp_info->ll2_ooo_handle; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) + goto err; + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_ooo_handle); + if (rc) + goto err; + + /* Start Unaligned MPA connection */ + cbs.rx_comp_cb = qed_iwarp_ll2_comp_mpa_pkt; + cbs.slowpath_cb = qed_iwarp_ll2_slowpath; + + memset(&data, 0, sizeof(data)); + data.input.conn_type = QED_LL2_TYPE_IWARP; + data.input.mtu = params->max_mtu; + /* FW requires that once a packet arrives OOO, it must have at + * least 2 rx buffers available on the unaligned connection + * for handling the case that it is a partial fpdu. + */ + data.input.rx_num_desc = n_ooo_bufs * 2; + data.input.tx_num_desc = data.input.rx_num_desc; + data.input.tx_max_bds_per_packet = QED_IWARP_MAX_BDS_PER_FPDU; + data.input.tx_tc = PKT_LB_TC; + data.input.tx_dest = QED_LL2_TX_DEST_LB; + data.p_connection_handle = &iwarp_info->ll2_mpa_handle; + data.input.secondary_queue = true; + data.cbs = &cbs; + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) + goto err; + + rc = qed_ll2_establish_connection(p_hwfn, iwarp_info->ll2_mpa_handle); + if (rc) + goto err; + + rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, + data.input.rx_num_desc, + buff_size, + iwarp_info->ll2_mpa_handle); + if (rc) + goto err; + + iwarp_info->partial_fpdus = kcalloc((u16)p_hwfn->p_rdma_info->num_qps, + sizeof(*iwarp_info->partial_fpdus), + GFP_KERNEL); + if (!iwarp_info->partial_fpdus) { + rc = -ENOMEM; + goto err; + } + + iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; + + iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); + if (!iwarp_info->mpa_intermediate_buf) { + rc = -ENOMEM; + goto err; + } + + /* The mpa_bufs array serves for pending RX packets received on the + * mpa ll2 that don't have place on the tx ring and require later + * processing. We can't fail on allocation of such a struct therefore + * we allocate enough to take care of all rx packets + */ + iwarp_info->mpa_bufs = kcalloc(data.input.rx_num_desc, + sizeof(*iwarp_info->mpa_bufs), + GFP_KERNEL); + if (!iwarp_info->mpa_bufs) { + rc = -ENOMEM; + goto err; + } + + INIT_LIST_HEAD(&iwarp_info->mpa_buf_pending_list); + INIT_LIST_HEAD(&iwarp_info->mpa_buf_list); + for (i = 0; i < data.input.rx_num_desc; i++) + list_add_tail(&iwarp_info->mpa_bufs[i].list_entry, + &iwarp_info->mpa_buf_list); + return rc; +err: + qed_iwarp_ll2_stop(p_hwfn); + + return rc; +} + +static struct { + u32 two_ports; + u32 four_ports; +} qed_iwarp_rcv_wnd_size[MAX_CHIP_IDS] = { + {QED_IWARP_RCV_WND_SIZE_DEF_BB_2P, QED_IWARP_RCV_WND_SIZE_DEF_BB_4P}, + {QED_IWARP_RCV_WND_SIZE_DEF_AH_2P, QED_IWARP_RCV_WND_SIZE_DEF_AH_4P} +}; + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_iwarp_info *iwarp_info; + enum chip_ids chip_id; + u32 rcv_wnd_size; + + iwarp_info = &p_hwfn->p_rdma_info->iwarp; + + iwarp_info->tcp_flags = QED_IWARP_TS_EN; + + chip_id = QED_IS_BB(cdev) ? CHIP_BB : CHIP_K2; + rcv_wnd_size = (qed_device_num_ports(cdev) == 4) ? + qed_iwarp_rcv_wnd_size[chip_id].four_ports : + qed_iwarp_rcv_wnd_size[chip_id].two_ports; + + /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ + iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - + ilog2(QED_IWARP_RCV_WND_SIZE_MIN); + iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale; + iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; + iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; + + iwarp_info->peer2peer = QED_IWARP_PARAM_P2P; + + iwarp_info->rtr_type = MPA_RTR_TYPE_ZERO_SEND | + MPA_RTR_TYPE_ZERO_WRITE | + MPA_RTR_TYPE_ZERO_READ; + + spin_lock_init(&p_hwfn->p_rdma_info->iwarp.qp_lock); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.ep_list); + INIT_LIST_HEAD(&p_hwfn->p_rdma_info->iwarp.listen_list); + + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_IWARP, + qed_iwarp_async_event); + qed_ooo_setup(p_hwfn); + + return qed_iwarp_ll2_start(p_hwfn, params, rcv_wnd_size); +} + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn) +{ + int rc; + + qed_iwarp_free_prealloc_ep(p_hwfn); + rc = qed_iwarp_wait_for_all_cids(p_hwfn); + if (rc) + return rc; + + return qed_iwarp_ll2_stop(p_hwfn); +} + +static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + qed_iwarp_modify_qp(p_hwfn, ep->qp, QED_IWARP_QP_STATE_ERROR, true); + + params.event = QED_IWARP_EVENT_CLOSE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ? + 0 : -ECONNRESET; + + /* paired with READ_ONCE in destroy_qp */ + smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + ep->event_cb(ep->cb_context, ¶ms); +} + +static void qed_iwarp_exception_received(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, + int fw_ret_code) +{ + struct qed_iwarp_cm_event_params params; + bool event_cb = false; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "EP(0x%x) fw_ret_code=%d\n", + ep->cid, fw_ret_code); + + switch (fw_ret_code) { + case IWARP_EXCEPTION_DETECTED_LLP_CLOSED: + params.status = 0; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_RESET: + params.status = -ECONNRESET; + params.event = QED_IWARP_EVENT_DISCONNECT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_RQ_EMPTY: + params.event = QED_IWARP_EVENT_RQ_EMPTY; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_IRQ_FULL: + params.event = QED_IWARP_EVENT_IRQ_FULL; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT: + params.event = QED_IWARP_EVENT_LLP_TIMEOUT; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_PROTECTION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW: + params.event = QED_IWARP_EVENT_CQ_OVERFLOW; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_CATASTROPHIC: + params.event = QED_IWARP_EVENT_QP_CATASTROPHIC; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_LOCAL_ACCESS_ERROR: + params.event = QED_IWARP_EVENT_LOCAL_ACCESS_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_REMOTE_OPERATION_ERROR: + params.event = QED_IWARP_EVENT_REMOTE_OPERATION_ERROR; + event_cb = true; + break; + case IWARP_EXCEPTION_DETECTED_TERMINATE_RECEIVED: + params.event = QED_IWARP_EVENT_TERMINATE_RECEIVED; + event_cb = true; + break; + default: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Unhandled exception received...fw_ret_code=%d\n", + fw_ret_code); + break; + } + + if (event_cb) { + params.ep_context = ep; + params.cm_info = &ep->cm_info; + ep->event_cb(ep->cb_context, ¶ms); + } +} + +static void +qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + struct qed_iwarp_cm_event_params params; + + memset(¶ms, 0, sizeof(params)); + params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE; + params.ep_context = ep; + params.cm_info = &ep->cm_info; + /* paired with READ_ONCE in destroy_qp */ + smp_store_release(&ep->state, QED_IWARP_EP_CLOSED); + + switch (fw_return_code) { + case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP connect got invalid packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECTION_RST: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "%s(0x%x) TCP Connection Reset\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + case IWARP_CONN_ERROR_TCP_CONNECT_TIMEOUT: + DP_NOTICE(p_hwfn, "%s(0x%x) TCP timeout\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -EBUSY; + break; + case IWARP_CONN_ERROR_MPA_NOT_SUPPORTED_VER: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA not supported VER\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNREFUSED; + break; + case IWARP_CONN_ERROR_MPA_INVALID_PACKET: + DP_NOTICE(p_hwfn, "%s(0x%x) MPA Invalid Packet\n", + QED_IWARP_CONNECT_MODE_STRING(ep), ep->tcp_cid); + params.status = -ECONNRESET; + break; + default: + DP_ERR(p_hwfn, + "%s(0x%x) Unexpected return code tcp connect: %d\n", + QED_IWARP_CONNECT_MODE_STRING(ep), + ep->tcp_cid, fw_return_code); + params.status = -ECONNRESET; + break; + } + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + ep->tcp_cid = QED_IWARP_INVALID_TCP_CID; + qed_iwarp_return_ep(p_hwfn, ep); + } else { + ep->event_cb(ep->cb_context, ¶ms); + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&ep->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + } +} + +static void +qed_iwarp_connect_complete(struct qed_hwfn *p_hwfn, + struct qed_iwarp_ep *ep, u8 fw_return_code) +{ + u8 ll2_syn_handle = p_hwfn->p_rdma_info->iwarp.ll2_syn_handle; + + if (ep->connect_mode == TCP_CONNECT_PASSIVE) { + /* Done with the SYN packet, post back to ll2 rx */ + qed_iwarp_ll2_post_rx(p_hwfn, ep->syn, ll2_syn_handle); + + ep->syn = NULL; + + /* If connect failed - upper layer doesn't know about it */ + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_received(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); + } else { + if (fw_return_code == RDMA_RETURN_OK) + qed_iwarp_mpa_offload(p_hwfn, ep); + else + qed_iwarp_tcp_connect_unsuccessful(p_hwfn, ep, + fw_return_code); + } +} + +static inline bool +qed_iwarp_check_ep_ok(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) +{ + if (!ep || (ep->sig != QED_EP_SIG)) { + DP_ERR(p_hwfn, "ERROR ON ASYNC ep=%p\n", ep); + return false; + } + + return true; +} + +static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + struct regpair *fw_handle = &data->rdma_data.async_handle; + struct qed_iwarp_ep *ep = NULL; + u16 srq_offset; + u16 srq_id; + u16 cid; + + ep = (struct qed_iwarp_ep *)(uintptr_t)HILO_64(fw_handle->hi, + fw_handle->lo); + + switch (fw_event_code) { + case IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE: + /* Async completion after TCP 3-way handshake */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "EP(0x%x) IWARP_EVENT_TYPE_ASYNC_CONNECT_COMPLETE fw_ret_code=%d\n", + ep->tcp_cid, fw_return_code); + qed_iwarp_connect_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_exception_received(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE: + /* Async completion for Close Connection ramrod */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_qp_in_error(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_ENHANCED_MPA_REPLY_ARRIVED: + /* Async event for active side only */ + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_MPA_REPLY_ARRIVED fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_reply_arrived(p_hwfn, ep); + break; + case IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE: + if (!qed_iwarp_check_ep_ok(p_hwfn, ep)) + return -EINVAL; + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "QP(0x%x) IWARP_EVENT_TYPE_ASYNC_MPA_HANDSHAKE_COMPLETE fw_ret_code=%d\n", + ep->cid, fw_return_code); + qed_iwarp_mpa_complete(p_hwfn, ep, fw_return_code); + break; + case IWARP_EVENT_TYPE_ASYNC_CID_CLEANED: + cid = (u16)le32_to_cpu(fw_handle->lo); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "(0x%x)IWARP_EVENT_TYPE_ASYNC_CID_CLEANED\n", cid); + qed_iwarp_cid_cleaned(p_hwfn, cid); + + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_EMPTY, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT\n"); + srq_offset = p_hwfn->p_rdma_info->srq_id_offset; + /* FW assigns value that is no greater than u16 */ + srq_id = ((u16)le32_to_cpu(fw_handle->lo)) - srq_offset; + events.affiliated_event(events.context, + QED_IWARP_EVENT_SRQ_LIMIT, + &srq_id); + break; + case IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW: + DP_NOTICE(p_hwfn, "IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW\n"); + + p_hwfn->p_rdma_info->events.affiliated_event( + p_hwfn->p_rdma_info->events.context, + QED_IWARP_EVENT_CQ_OVERFLOW, + (void *)fw_handle); + break; + default: + DP_ERR(p_hwfn, "Received unexpected async iwarp event %d\n", + fw_event_code); + return -EINVAL; + } + return 0; +} + +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_iwarp_listener *listener; + + listener = kzalloc(sizeof(*listener), GFP_KERNEL); + if (!listener) + return -ENOMEM; + + listener->ip_version = iparams->ip_version; + memcpy(listener->ip_addr, iparams->ip_addr, sizeof(listener->ip_addr)); + listener->port = iparams->port; + listener->vlan = iparams->vlan; + + listener->event_cb = iparams->event_cb; + listener->cb_context = iparams->cb_context; + listener->max_backlog = iparams->max_backlog; + oparams->handle = listener; + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_add_tail(&listener->list_entry, + &p_hwfn->p_rdma_info->iwarp.listen_list); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "callback=%p handle=%p ip=%x:%x:%x:%x port=0x%x vlan=0x%x\n", + listener->event_cb, + listener, + listener->ip_addr[0], + listener->ip_addr[1], + listener->ip_addr[2], + listener->ip_addr[3], listener->port, listener->vlan); + + return 0; +} + +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle) +{ + struct qed_iwarp_listener *listener = handle; + struct qed_hwfn *p_hwfn = rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "handle=%p\n", handle); + + spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + list_del(&listener->list_entry); + spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock); + + kfree(listener); + + return 0; +} + +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams) +{ + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_iwarp_ep *ep; + struct qed_rdma_qp *qp; + int rc; + + ep = iparams->ep_context; + if (!ep) { + DP_ERR(p_hwfn, "Ep Context receive in send_rtr is NULL\n"); + return -EINVAL; + } + + qp = ep->qp; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP(0x%x) EP(0x%x)\n", + qp->icid, ep->tcp_cid); + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR, + PROTOCOLID_IWARP, &init_data); + + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = 0x%x\n", rc); + + return rc; +} + +void +qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + out_params->state = qed_iwarp2roce_state(qp->iwarp_state); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h new file mode 100644 index 0000000000..c3872cd945 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h @@ -0,0 +1,206 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_IWARP_H +#define _QED_IWARP_H + +enum qed_iwarp_qp_state { + QED_IWARP_QP_STATE_IDLE, + QED_IWARP_QP_STATE_RTS, + QED_IWARP_QP_STATE_TERMINATE, + QED_IWARP_QP_STATE_CLOSING, + QED_IWARP_QP_STATE_ERROR, +}; + +enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); + +#define QED_IWARP_PREALLOC_CNT (256) + +#define QED_IWARP_LL2_SYN_TX_SIZE (128) +#define QED_IWARP_LL2_SYN_RX_SIZE (256) + +#define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) +#define QED_IWARP_MAX_OOO (16) +#define QED_IWARP_LL2_OOO_MAX_RX_SIZE (16384) + +#define QED_IWARP_HANDLE_INVAL (0xff) + +struct qed_iwarp_ll2_buff { + struct qed_iwarp_ll2_buff *piggy_buf; + void *data; + dma_addr_t data_phys_addr; + u32 buff_size; +}; + +struct qed_iwarp_ll2_mpa_buf { + struct list_head list_entry; + struct qed_iwarp_ll2_buff *ll2_buf; + struct unaligned_opaque_data data; + u16 tcp_payload_len; + u8 placement_offset; +}; + +/* In some cases a fpdu will arrive with only one byte of the header, in this + * case the fpdu_length will be partial (contain only higher byte and + * incomplete bytes will contain the invalid value + */ +#define QED_IWARP_INVALID_INCOMPLETE_BYTES 0xffff + +struct qed_iwarp_fpdu { + struct qed_iwarp_ll2_buff *mpa_buf; + void *mpa_frag_virt; + dma_addr_t mpa_frag; + dma_addr_t pkt_hdr; + u16 mpa_frag_len; + u16 fpdu_length; + u16 incomplete_bytes; + u8 pkt_hdr_size; +}; + +struct qed_iwarp_info { + struct list_head listen_list; /* qed_iwarp_listener */ + struct list_head ep_list; /* qed_iwarp_ep */ + struct list_head ep_free_list; /* pre-allocated ep's */ + struct list_head mpa_buf_list; /* list of mpa_bufs */ + struct list_head mpa_buf_pending_list; + spinlock_t iw_lock; /* for iwarp resources */ + spinlock_t qp_lock; /* for teardown races */ + u32 rcv_wnd_scale; + u16 rcv_wnd_size; + u16 max_mtu; + u8 mac_addr[ETH_ALEN]; + u8 crc_needed; + u8 tcp_flags; + u8 ll2_syn_handle; + u8 ll2_ooo_handle; + u8 ll2_mpa_handle; + u8 peer2peer; + enum mpa_negotiation_mode mpa_rev; + enum mpa_rtr_type rtr_type; + struct qed_iwarp_fpdu *partial_fpdus; + struct qed_iwarp_ll2_mpa_buf *mpa_bufs; + u8 *mpa_intermediate_buf; + u16 max_num_partial_fpdus; +}; + +enum qed_iwarp_ep_state { + QED_IWARP_EP_INIT, + QED_IWARP_EP_MPA_REQ_RCVD, + QED_IWARP_EP_MPA_OFFLOADED, + QED_IWARP_EP_ESTABLISHED, + QED_IWARP_EP_CLOSED +}; + +union async_output { + struct iwarp_eqe_data_mpa_async_completion mpa_response; + struct iwarp_eqe_data_tcp_async_completion mpa_request; +}; + +#define QED_MAX_PRIV_DATA_LEN (512) +struct qed_iwarp_ep_memory { + u8 in_pdata[QED_MAX_PRIV_DATA_LEN]; + u8 out_pdata[QED_MAX_PRIV_DATA_LEN]; + union async_output async_output; +}; + +/* Endpoint structure represents a TCP connection. This connection can be + * associated with a QP or not (in which case QP==NULL) + */ +struct qed_iwarp_ep { + struct list_head list_entry; + struct qed_rdma_qp *qp; + struct qed_iwarp_ep_memory *ep_buffer_virt; + dma_addr_t ep_buffer_phys; + enum qed_iwarp_ep_state state; + int sig; + struct qed_iwarp_cm_info cm_info; + enum tcp_connect_mode connect_mode; + enum mpa_rtr_type rtr_type; + enum mpa_negotiation_mode mpa_rev; + u32 tcp_cid; + u32 cid; + u16 mss; + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + bool mpa_reply_processed; + + /* For Passive side - syn packet related data */ + u16 syn_ip_payload_length; + struct qed_iwarp_ll2_buff *syn; + dma_addr_t syn_phy_addr; + + /* The event_cb function is called for asynchrounous events associated + * with the ep. It is initialized at different entry points depending + * on whether the ep is the tcp connection active side or passive side + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; +}; + +struct qed_iwarp_listener { + struct list_head list_entry; + + /* The event_cb function is called for connection requests. + * The cb_context is passed to the event_cb function. + */ + iwarp_event_handler event_cb; + void *cb_context; + u32 max_backlog; + u32 ip_addr[4]; + u16 port; + u16 vlan; + u8 ip_version; +}; + +int qed_iwarp_alloc(struct qed_hwfn *p_hwfn); + +int qed_iwarp_setup(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params); + +void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, + struct iwarp_init_func_ramrod_data *p_ramrod); + +int qed_iwarp_stop(struct qed_hwfn *p_hwfn); + +void qed_iwarp_resc_free(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_devinfo(struct qed_hwfn *p_hwfn); + +void qed_iwarp_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +int qed_iwarp_create_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_create_qp_out_params *out_params); + +int qed_iwarp_modify_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp, + enum qed_iwarp_qp_state new_state, bool internal); + +int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +int qed_iwarp_fw_destroy(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +void qed_iwarp_query_qp(struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params); + +int +qed_iwarp_connect(void *rdma_cxt, + struct qed_iwarp_connect_in *iparams, + struct qed_iwarp_connect_out *oparams); + +int +qed_iwarp_create_listen(void *rdma_cxt, + struct qed_iwarp_listen_in *iparams, + struct qed_iwarp_listen_out *oparams); + +int qed_iwarp_accept(void *rdma_cxt, struct qed_iwarp_accept_in *iparams); + +int qed_iwarp_reject(void *rdma_cxt, struct qed_iwarp_reject_in *iparams); +int qed_iwarp_destroy_listen(void *rdma_cxt, void *handle); + +int qed_iwarp_send_rtr(void *rdma_cxt, struct qed_iwarp_send_rtr_in *iparams); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c new file mode 100644 index 0000000000..970b9aabbc --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -0,0 +1,2920 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/workqueue.h> +#include <linux/bitops.h> +#include <linux/bug.h> +#include <linux/vmalloc.h> +#include "qed.h" +#include <linux/qed/qed_chain.h> +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_dev_api.h" +#include <linux/qed/qed_eth_if.h> +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_l2.h" +#include "qed_mcp.h" +#include "qed_ptp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" + +#define QED_MAX_SGES_NUM 16 +#define CRC32_POLY 0x1edc6f41 + +struct qed_l2_info { + u32 queues; + unsigned long **pp_qid_usage; + + /* The lock is meant to synchronize access to the qid usage */ + struct mutex lock; +}; + +int qed_l2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_l2_info *p_l2_info; + unsigned long **pp_qids; + u32 i; + + if (!QED_IS_L2_PERSONALITY(p_hwfn)) + return 0; + + p_l2_info = kzalloc(sizeof(*p_l2_info), GFP_KERNEL); + if (!p_l2_info) + return -ENOMEM; + p_hwfn->p_l2_info = p_l2_info; + + if (IS_PF(p_hwfn->cdev)) { + p_l2_info->queues = RESC_NUM(p_hwfn, QED_L2_QUEUE); + } else { + u8 rx = 0, tx = 0; + + qed_vf_get_num_rxqs(p_hwfn, &rx); + qed_vf_get_num_txqs(p_hwfn, &tx); + + p_l2_info->queues = max_t(u8, rx, tx); + } + + pp_qids = kcalloc(p_l2_info->queues, sizeof(unsigned long *), + GFP_KERNEL); + if (!pp_qids) + return -ENOMEM; + p_l2_info->pp_qid_usage = pp_qids; + + for (i = 0; i < p_l2_info->queues; i++) { + pp_qids[i] = kzalloc(MAX_QUEUES_PER_QZONE / 8, GFP_KERNEL); + if (!pp_qids[i]) + return -ENOMEM; + } + + return 0; +} + +void qed_l2_setup(struct qed_hwfn *p_hwfn) +{ + if (!QED_IS_L2_PERSONALITY(p_hwfn)) + return; + + mutex_init(&p_hwfn->p_l2_info->lock); +} + +void qed_l2_free(struct qed_hwfn *p_hwfn) +{ + u32 i; + + if (!QED_IS_L2_PERSONALITY(p_hwfn)) + return; + + if (!p_hwfn->p_l2_info) + return; + + if (!p_hwfn->p_l2_info->pp_qid_usage) + goto out_l2_info; + + /* Free until hit first uninitialized entry */ + for (i = 0; i < p_hwfn->p_l2_info->queues; i++) { + if (!p_hwfn->p_l2_info->pp_qid_usage[i]) + break; + kfree(p_hwfn->p_l2_info->pp_qid_usage[i]); + } + + kfree(p_hwfn->p_l2_info->pp_qid_usage); + +out_l2_info: + kfree(p_hwfn->p_l2_info); + p_hwfn->p_l2_info = NULL; +} + +static bool qed_eth_queue_qid_usage_add(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_l2_info *p_l2_info = p_hwfn->p_l2_info; + u16 queue_id = p_cid->rel.queue_id; + bool b_rc = true; + u8 first; + + mutex_lock(&p_l2_info->lock); + + if (queue_id >= p_l2_info->queues) { + DP_NOTICE(p_hwfn, + "Requested to increase usage for qzone %04x out of %08x\n", + queue_id, p_l2_info->queues); + b_rc = false; + goto out; + } + + first = (u8)find_first_zero_bit(p_l2_info->pp_qid_usage[queue_id], + MAX_QUEUES_PER_QZONE); + if (first >= MAX_QUEUES_PER_QZONE) { + b_rc = false; + goto out; + } + + __set_bit(first, p_l2_info->pp_qid_usage[queue_id]); + p_cid->qid_usage_idx = first; + +out: + mutex_unlock(&p_l2_info->lock); + return b_rc; +} + +static void qed_eth_queue_qid_usage_del(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + mutex_lock(&p_hwfn->p_l2_info->lock); + + clear_bit(p_cid->qid_usage_idx, + p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]); + + mutex_unlock(&p_hwfn->p_l2_info->lock); +} + +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + bool b_legacy_vf = !!(p_cid->vf_legacy & QED_QCID_LEGACY_VF_CID); + + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid); + + /* For PF's VFs we maintain the index inside queue-zone in IOV */ + if (p_cid->vfid == QED_QUEUE_CID_SELF) + qed_eth_queue_qid_usage_del(p_hwfn, p_cid); + + vfree(p_cid); +} + +/* The internal is only meant to be directly called by PFs initializeing CIDs + * for their VFs. + */ +static struct qed_queue_cid * +_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params) +{ + struct qed_queue_cid *p_cid; + int rc; + + p_cid = vzalloc(sizeof(*p_cid)); + if (!p_cid) + return NULL; + + p_cid->opaque_fid = opaque_fid; + p_cid->cid = cid; + p_cid->p_owner = p_hwfn; + + /* Fill in parameters */ + p_cid->rel.vport_id = p_params->vport_id; + p_cid->rel.queue_id = p_params->queue_id; + p_cid->rel.stats_id = p_params->stats_id; + p_cid->sb_igu_id = p_params->p_sb->igu_sb_id; + p_cid->b_is_rx = b_is_rx; + p_cid->sb_idx = p_params->sb_idx; + + /* Fill-in bits related to VFs' queues if information was provided */ + if (p_vf_params) { + p_cid->vfid = p_vf_params->vfid; + p_cid->vf_qid = p_vf_params->vf_qid; + p_cid->vf_legacy = p_vf_params->vf_legacy; + } else { + p_cid->vfid = QED_QUEUE_CID_SELF; + } + + /* Don't try calculating the absolute indices for VFs */ + if (IS_VF(p_hwfn->cdev)) { + p_cid->abs = p_cid->rel; + goto out; + } + + /* Calculate the engine-absolute indices of the resources. + * This would guarantee they're valid later on. + * In some cases [SBs] we already have the right values. + */ + rc = qed_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id); + if (rc) + goto fail; + + rc = qed_fw_l2_queue(p_hwfn, p_cid->rel.queue_id, &p_cid->abs.queue_id); + if (rc) + goto fail; + + /* In case of a PF configuring its VF's queues, the stats-id is already + * absolute [since there's a single index that's suitable per-VF]. + */ + if (p_cid->vfid == QED_QUEUE_CID_SELF) { + rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id, + &p_cid->abs.stats_id); + if (rc) + goto fail; + } else { + p_cid->abs.stats_id = p_cid->rel.stats_id; + } + +out: + /* VF-images have provided the qid_usage_idx on their own. + * Otherwise, we need to allocate a unique one. + */ + if (!p_vf_params) { + if (!qed_eth_queue_qid_usage_add(p_hwfn, p_cid)) + goto fail; + } else { + p_cid->qid_usage_idx = p_vf_params->qid_usage_idx; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n", + p_cid->opaque_fid, + p_cid->cid, + p_cid->rel.vport_id, + p_cid->abs.vport_id, + p_cid->rel.queue_id, + p_cid->qid_usage_idx, + p_cid->abs.queue_id, + p_cid->rel.stats_id, + p_cid->abs.stats_id, p_cid->sb_igu_id, p_cid->sb_idx); + + return p_cid; + +fail: + vfree(p_cid); + return NULL; +} + +struct qed_queue_cid * +qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params) +{ + struct qed_queue_cid *p_cid; + u8 vfid = QED_CXT_PF_CID; + bool b_legacy_vf = false; + u32 cid = 0; + + /* In case of legacy VFs, The CID can be derived from the additional + * VF parameters - the VF assumes queue X uses CID X, so we can simply + * use the vf_qid for this purpose as well. + */ + if (p_vf_params) { + vfid = p_vf_params->vfid; + + if (p_vf_params->vf_legacy & QED_QCID_LEGACY_VF_CID) { + b_legacy_vf = true; + cid = p_vf_params->vf_qid; + } + } + + /* Get a unique firmware CID for this queue, in case it's a PF. + * VF's don't need a CID as the queue configuration will be done + * by PF. + */ + if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) { + if (_qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, + &cid, vfid)) { + DP_NOTICE(p_hwfn, "Failed to acquire cid\n"); + return NULL; + } + } + + p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, + p_params, b_is_rx, p_vf_params); + if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf) + _qed_cxt_release_cid(p_hwfn, cid, vfid); + + return p_cid; +} + +static struct qed_queue_cid * +qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + bool b_is_rx, + struct qed_queue_start_common_params *p_params) +{ + return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx, + NULL); +} + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) +{ + struct vport_start_ramrod_data *p_ramrod = NULL; + struct eth_vport_tpa_param *tpa_param; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 min_size, rx_mode = 0; + u8 abs_vport_id = 0; + int rc; + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_START, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->mtu = cpu_to_le16(p_params->mtu); + p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; + p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged; + + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); + + p_ramrod->rx_mode.state = cpu_to_le16(rx_mode); + + /* TPA related fields */ + tpa_param = &p_ramrod->tpa_param; + memset(tpa_param, 0, sizeof(*tpa_param)); + + tpa_param->max_buff_num = p_params->max_buffers_per_cqe; + + switch (p_params->tpa_mode) { + case QED_TPA_MODE_GRO: + min_size = p_params->mtu / 2; + + tpa_param->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + tpa_param->tpa_max_size = cpu_to_le16(U16_MAX); + tpa_param->tpa_min_size_to_cont = cpu_to_le16(min_size); + tpa_param->tpa_min_size_to_start = cpu_to_le16(min_size); + tpa_param->tpa_ipv4_en_flg = 1; + tpa_param->tpa_ipv6_en_flg = 1; + tpa_param->tpa_pkt_split_flg = 1; + tpa_param->tpa_gro_consistent_flg = 1; + break; + default: + break; + } + + p_ramrod->tx_switching_en = p_params->tx_switching; + + p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac; + p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype; + + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ + p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, + p_params->concrete_fid); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params) +{ + if (IS_VF(p_hwfn->cdev)) { + return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id, + p_params->mtu, + p_params->remove_inner_vlan, + p_params->tpa_mode, + p_params->max_buffers_per_cqe, + p_params->only_untagged); + } + + return qed_sp_eth_vport_start(p_hwfn, p_params); +} + +static int +qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_rss_params *p_rss) +{ + struct eth_vport_rss_config *p_config; + u16 capabilities = 0; + int i, table_size; + int rc = 0; + + if (!p_rss) { + p_ramrod->common.update_rss_flg = 0; + return rc; + } + p_config = &p_ramrod->rss_config; + + BUILD_BUG_ON(QED_RSS_IND_TABLE_SIZE != ETH_RSS_IND_TABLE_ENTRIES_NUM); + + rc = qed_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); + if (rc) + return rc; + + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; + + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : + ETH_VPORT_RSS_MODE_DISABLED; + + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV4)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV6)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV4_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV6_TCP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV4_UDP)); + SET_FIELD(capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, + !!(p_rss->rss_caps & QED_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; + + p_config->capabilities = cpu_to_le16(capabilities); + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", + p_ramrod->common.update_rss_flg, + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); + + table_size = min_t(int, QED_RSS_IND_TABLE_SIZE, + 1 << p_config->tbl_size); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue = p_rss->rss_ind_table[i]; + + if (!p_queue) + return -EINVAL; + + p_config->indirection_table[i] = + cpu_to_le16(p_queue->abs.queue_id); + } + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "Configured RSS indirection table [%d entries]:\n", + table_size); + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i += 0x10) { + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n", + le16_to_cpu(p_config->indirection_table[i]), + le16_to_cpu(p_config->indirection_table[i + 1]), + le16_to_cpu(p_config->indirection_table[i + 2]), + le16_to_cpu(p_config->indirection_table[i + 3]), + le16_to_cpu(p_config->indirection_table[i + 4]), + le16_to_cpu(p_config->indirection_table[i + 5]), + le16_to_cpu(p_config->indirection_table[i + 6]), + le16_to_cpu(p_config->indirection_table[i + 7]), + le16_to_cpu(p_config->indirection_table[i + 8]), + le16_to_cpu(p_config->indirection_table[i + 9]), + le16_to_cpu(p_config->indirection_table[i + 10]), + le16_to_cpu(p_config->indirection_table[i + 11]), + le16_to_cpu(p_config->indirection_table[i + 12]), + le16_to_cpu(p_config->indirection_table[i + 13]), + le16_to_cpu(p_config->indirection_table[i + 14]), + le16_to_cpu(p_config->indirection_table[i + 15])); + } + + for (i = 0; i < 10; i++) + p_config->rss_key[i] = cpu_to_le32(p_rss->rss_key[i]); + + return rc; +} + +static void +qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_filter_accept_flags accept_flags) +{ + p_ramrod->common.update_rx_mode_flg = + accept_flags.update_rx_mode_config; + + p_ramrod->common.update_tx_mode_flg = + accept_flags.update_tx_mode_config; + + /* Set Rx mode accept flags */ + if (p_ramrod->common.update_rx_mode_flg) { + u8 accept_filter = accept_flags.rx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + !(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) || + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI, + !!(accept_filter & QED_ACCEPT_ANY_VNI)); + + p_ramrod->rx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->rx_mode.state = 0x%x\n", state); + } + + /* Set Tx mode accept flags */ + if (p_ramrod->common.update_tx_mode_flg) { + u8 accept_filter = accept_flags.tx_accept_filter; + u16 state = 0; + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, + !!(accept_filter & QED_ACCEPT_NONE)); + + SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & QED_ACCEPT_BCAST)); + + p_ramrod->tx_mode.state = cpu_to_le16(state); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "p_ramrod->tx_mode.state = 0x%x\n", state); + } +} + +static void +qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + const struct qed_sge_tpa_params *param) +{ + struct eth_vport_tpa_param *tpa; + + if (!param) { + p_ramrod->common.update_tpa_param_flg = 0; + p_ramrod->common.update_tpa_en_flg = 0; + p_ramrod->common.update_tpa_param_flg = 0; + return; + } + + p_ramrod->common.update_tpa_en_flg = param->update_tpa_en_flg; + tpa = &p_ramrod->tpa_param; + tpa->tpa_ipv4_en_flg = param->tpa_ipv4_en_flg; + tpa->tpa_ipv6_en_flg = param->tpa_ipv6_en_flg; + tpa->tpa_ipv4_tunn_en_flg = param->tpa_ipv4_tunn_en_flg; + tpa->tpa_ipv6_tunn_en_flg = param->tpa_ipv6_tunn_en_flg; + + p_ramrod->common.update_tpa_param_flg = param->update_tpa_param_flg; + tpa->max_buff_num = param->max_buffers_per_cqe; + tpa->tpa_pkt_split_flg = param->tpa_pkt_split_flg; + tpa->tpa_hdr_data_split_flg = param->tpa_hdr_data_split_flg; + tpa->tpa_gro_consistent_flg = param->tpa_gro_consistent_flg; + tpa->tpa_max_aggs_num = param->tpa_max_aggs_num; + tpa->tpa_max_size = cpu_to_le16(param->tpa_max_size); + tpa->tpa_min_size_to_start = cpu_to_le16(param->tpa_min_size_to_start); + tpa->tpa_min_size_to_cont = cpu_to_le16(param->tpa_min_size_to_cont); +} + +static void +qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct qed_sp_vport_update_params *p_params) +{ + int i; + + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + + if (!p_params->update_approx_mcast_flg) + return; + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = p_params->bins; + + p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); + } +} + +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_rss_params *p_rss_params = p_params->rss_params; + struct vport_update_ramrod_data_cmn *p_cmn; + struct qed_sp_init_data init_data; + struct vport_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + u8 abs_vport_id = 0, val; + int rc = -EINVAL; + + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_vport_update(p_hwfn, p_params); + return rc; + } + + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + return rc; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + /* Copy input params to ramrod according to FW struct */ + p_ramrod = &p_ent->ramrod.vport_update; + p_cmn = &p_ramrod->common; + + p_cmn->vport_id = abs_vport_id; + p_cmn->rx_active_flg = p_params->vport_active_rx_flg; + p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; + p_cmn->tx_active_flg = p_params->vport_active_tx_flg; + p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; + p_cmn->accept_any_vlan = p_params->accept_any_vlan; + val = p_params->update_accept_any_vlan_flg; + p_cmn->update_accept_any_vlan_flg = val; + + p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg; + val = p_params->update_inner_vlan_removal_flg; + p_cmn->update_inner_vlan_removal_en_flg = val; + + p_cmn->default_vlan_en = p_params->default_vlan_enable_flg; + val = p_params->update_default_vlan_enable_flg; + p_cmn->update_default_vlan_en_flg = val; + + p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan); + p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg; + + p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg; + + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; + p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg; + + p_cmn->anti_spoofing_en = p_params->anti_spoofing_en; + val = p_params->update_anti_spoofing_en_flg; + p_ramrod->common.update_anti_spoofing_en_flg = val; + + rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); + if (rc) { + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; + } + + if (p_params->update_ctl_frame_check) { + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; + } + + /* Update mcast bins for VFs, PF doesn't use this functionality */ + qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); + + qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params); + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) +{ + struct vport_stop_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u8 abs_vport_id = 0; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_vport_stop(p_hwfn); + + rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc) + return rc; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_STOP, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vport_stop; + p_ramrod->vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn, + struct qed_filter_accept_flags *p_accept_flags) +{ + struct qed_sp_vport_update_params s_params; + + memset(&s_params, 0, sizeof(s_params)); + memcpy(&s_params.accept_flags, p_accept_flags, + sizeof(struct qed_filter_accept_flags)); + + return qed_vf_pf_vport_update(p_hwfn, &s_params); +} + +static int qed_filter_accept_cmd(struct qed_dev *cdev, + u8 vport, + struct qed_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_sp_vport_update_params vport_update_params; + int i, rc; + + /* Prepare and send the vport rx_mode change */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = vport; + vport_update_params.accept_flags = accept_flags; + vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + vport_update_params.accept_any_vlan = accept_any_vlan; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (IS_VF(cdev)) { + rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags); + if (rc) + return rc; + continue; + } + + rc = qed_sp_vport_update(p_hwfn, &vport_update_params, + comp_mode, p_comp_data); + if (rc) { + DP_ERR(cdev, "Update rx_mode failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Accept filter configured, flags = [Rx]%x [Tx]%x\n", + accept_flags.rx_accept_filter, + accept_flags.tx_accept_filter); + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); + } + + return 0; +} + +int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) +{ + struct rx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n", + p_cid->opaque_fid, p_cid->cid, + p_cid->abs.queue_id, p_cid->abs.vport_id, p_cid->sb_igu_id); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_start; + + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; + + p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); + DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); + + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); + + if (p_cid->vfid != QED_QUEUE_CID_SELF) { + bool b_legacy_vf = !!(p_cid->vf_legacy & + QED_QCID_LEGACY_VF_RX_PROD); + + p_ramrod->vf_rx_prod_index = p_cid->vf_qid; + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Queue%s is meant for VF rxq[%02x]\n", + b_legacy_vf ? " [legacy]" : "", p_cid->vf_qid); + p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf; + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + u32 init_prod_val = 0; + + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM, + MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + + return qed_eth_rxq_start_ramrod(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size); +} + +static int +qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct qed_rxq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + + /* Allocate a CID for the queue */ + p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params); + if (!p_cid) + return -ENOMEM; + + if (IS_PF(p_hwfn->cdev)) { + rc = qed_eth_pf_rx_queue_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, + &p_ret_params->p_prod); + } else { + rc = qed_vf_pf_rxq_start(p_hwfn, p_cid, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size, &p_ret_params->p_prod); + } + + /* Provide the caller with a reference to as handler */ + if (rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; + + return rc; +} + +int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + void **pp_rxq_handles, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct rx_queue_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + struct qed_queue_cid *p_cid; + int rc = -EINVAL; + u8 i; + + memset(&init_data, 0, sizeof(init_data)); + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + for (i = 0; i < num_rxqs; i++) { + p_cid = ((struct qed_queue_cid **)pp_rxq_handles)[i]; + + /* Get SPQ entry */ + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_update; + p_ramrod->vport_id = p_cid->abs.vport_id; + + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); + p_ramrod->complete_cqe_flg = complete_cqe_flg; + p_ramrod->complete_event_flg = complete_event_flg; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + } + + return rc; +} + +static int +qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + bool b_eq_completion_only, bool b_cqe_completion) +{ + struct rx_queue_stop_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_stop; + p_ramrod->vport_id = p_cid->abs.vport_id; + p_ramrod->rx_queue_id = cpu_to_le16(p_cid->abs.queue_id); + + /* Cleaning the queue requires the completion to arrive there. + * In addition, VFs require the answer to come as eqe to PF. + */ + p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) && + !b_eq_completion_only) || + b_cqe_completion; + p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) || + b_eq_completion_only; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_rxq; + int rc = -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_rx_queue_stop(p_hwfn, p_cid, + eq_completion_only, + cqe_completion); + else + rc = qed_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion); + + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; +} + +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id) +{ + struct tx_queue_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = p_cid->abs.vport_id; + + p_ramrod->sb_id = cpu_to_le16(p_cid->sb_igu_id); + p_ramrod->sb_index = p_cid->sb_idx; + p_ramrod->stats_counter_id = p_cid->abs.stats_id; + + p_ramrod->queue_zone_id = cpu_to_le16(p_cid->abs.queue_id); + p_ramrod->same_as_last_id = cpu_to_le16(p_cid->abs.queue_id); + + p_ramrod->pbl_size = cpu_to_le16(pbl_size); + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); + + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + int rc; + + rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, + pbl_addr, pbl_size, + qed_get_cm_pq_idx_mcos(p_hwfn, tc)); + if (rc) + return rc; + + /* Provide the caller with the necessary return values */ + *pp_doorbell = p_hwfn->doorbells + + qed_db_addr(p_cid->cid, DQ_DEMS_LEGACY); + + return 0; +} + +static int +qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + u8 tc, + dma_addr_t pbl_addr, + u16 pbl_size, + struct qed_txq_start_ret_params *p_ret_params) +{ + struct qed_queue_cid *p_cid; + int rc; + + p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params); + if (!p_cid) + return -EINVAL; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_start(p_hwfn, p_cid, tc, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + else + rc = qed_vf_pf_txq_start(p_hwfn, p_cid, + pbl_addr, pbl_size, + &p_ret_params->p_doorbell); + + if (rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + else + p_ret_params->p_handle = (void *)p_cid; + + return rc; +} + +static int +qed_eth_pf_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_cid->cid; + init_data.opaque_fid = p_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_handle) +{ + struct qed_queue_cid *p_cid = (struct qed_queue_cid *)p_handle; + int rc; + + if (IS_PF(p_hwfn->cdev)) + rc = qed_eth_pf_tx_queue_stop(p_hwfn, p_cid); + else + rc = qed_vf_pf_txq_stop(p_hwfn, p_cid); + + if (!rc) + qed_eth_queue_cid_release(p_hwfn, p_cid); + return rc; +} + +static enum eth_filter_action qed_filter_action(enum qed_filter_opcode opcode) +{ + enum eth_filter_action action = MAX_ETH_FILTER_ACTION; + + switch (opcode) { + case QED_FILTER_ADD: + action = ETH_FILTER_ACTION_ADD; + break; + case QED_FILTER_REMOVE: + action = ETH_FILTER_ACTION_REMOVE; + break; + case QED_FILTER_FLUSH: + action = ETH_FILTER_ACTION_REMOVE_ALL; + break; + default: + action = MAX_ETH_FILTER_ACTION; + } + + return action; +} + +static int +qed_filter_ucast_common(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + struct vport_filter_update_ramrod_data **pp_ramrod, + struct qed_spq_entry **pp_ent, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + u8 vport_to_add_to = 0, vport_to_remove_from = 0; + struct vport_filter_update_ramrod_data *p_ramrod; + struct eth_filter_cmd *p_first_filter; + struct eth_filter_cmd *p_second_filter; + struct qed_sp_init_data init_data; + enum eth_filter_action action; + int rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &vport_to_remove_from); + if (rc) + return rc; + + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &vport_to_add_to); + if (rc) + return rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, pp_ent, + ETH_RAMROD_FILTERS_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; + p_ramrod = *pp_ramrod; + p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; + p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; + + switch (p_filter_cmd->opcode) { + case QED_FILTER_REPLACE: + case QED_FILTER_MOVE: + p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; + default: + p_ramrod->filter_cmd_hdr.cmd_cnt = 1; break; + } + + p_first_filter = &p_ramrod->filter_cmds[0]; + p_second_filter = &p_ramrod->filter_cmds[1]; + + switch (p_filter_cmd->type) { + case QED_FILTER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_MAC; break; + case QED_FILTER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_VLAN; break; + case QED_FILTER_MAC_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_PAIR; break; + case QED_FILTER_INNER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; break; + case QED_FILTER_INNER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; break; + case QED_FILTER_INNER_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; break; + case QED_FILTER_INNER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; + break; + case QED_FILTER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; break; + case QED_FILTER_VNI: + p_first_filter->type = ETH_FILTER_TYPE_VNI; break; + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) { + qed_set_fw_mac_addr(&p_first_filter->mac_msb, + &p_first_filter->mac_mid, + &p_first_filter->mac_lsb, + (u8 *)p_filter_cmd->mac); + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) + p_first_filter->vlan_id = cpu_to_le16(p_filter_cmd->vlan); + + if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_VNI)) + p_first_filter->vni = cpu_to_le32(p_filter_cmd->vni); + + if (p_filter_cmd->opcode == QED_FILTER_MOVE) { + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; + + p_first_filter->action = ETH_FILTER_ACTION_REMOVE; + + p_first_filter->vport_id = vport_to_remove_from; + + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + memcpy(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; + } else { + action = qed_filter_action(p_filter_cmd->opcode); + + if (action == MAX_ETH_FILTER_ACTION) { + DP_NOTICE(p_hwfn, + "%d is not supported yet\n", + p_filter_cmd->opcode); + qed_sp_destroy_request(p_hwfn, *pp_ent); + return -EINVAL; + } + + p_first_filter->action = action; + p_first_filter->vport_id = (p_filter_cmd->opcode == + QED_FILTER_REMOVE) ? + vport_to_remove_from : + vport_to_add_to; + } + + return 0; +} + +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct eth_filter_cmd_header *p_header; + int rc; + + rc = qed_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, + &p_ramrod, &p_ent, + comp_mode, p_comp_data); + if (rc) { + DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); + return rc; + } + p_header = &p_ramrod->filter_cmd_hdr; + p_header->assert_on_error = p_filter_cmd->assert_on_error; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) { + DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", + (p_filter_cmd->opcode == QED_FILTER_ADD) ? "ADD" : + ((p_filter_cmd->opcode == QED_FILTER_REMOVE) ? + "REMOVE" : + ((p_filter_cmd->opcode == QED_FILTER_MOVE) ? + "MOVE" : "REPLACE")), + (p_filter_cmd->type == QED_FILTER_MAC) ? "MAC" : + ((p_filter_cmd->type == QED_FILTER_VLAN) ? + "VLAN" : "MAC & VLAN"), + p_ramrod->filter_cmd_hdr.cmd_cnt, + p_filter_cmd->is_rx_filter, + p_filter_cmd->is_tx_filter); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", + p_filter_cmd->vport_to_add_to, + p_filter_cmd->vport_to_remove_from, + p_filter_cmd->mac[0], + p_filter_cmd->mac[1], + p_filter_cmd->mac[2], + p_filter_cmd->mac[3], + p_filter_cmd->mac[4], + p_filter_cmd->mac[5], + p_filter_cmd->vlan); + + return 0; +} + +/******************************************************************************* + * Description: + * Calculates crc 32 on a buffer + * Note: crc32_length MUST be aligned to 8 + * Return: + ******************************************************************************/ +static u32 qed_calc_crc32c(u8 *crc32_packet, + u32 crc32_length, u32 crc32_seed, u8 complement) +{ + u32 byte = 0, bit = 0, crc32_result = crc32_seed; + u8 msb = 0, current_byte = 0; + + if ((!crc32_packet) || + (crc32_length == 0) || + ((crc32_length % 8) != 0)) + return crc32_result; + for (byte = 0; byte < crc32_length; byte++) { + current_byte = crc32_packet[byte]; + for (bit = 0; bit < 8; bit++) { + msb = (u8)(crc32_result >> 31); + crc32_result = crc32_result << 1; + if (msb != (0x1 & (current_byte >> bit))) { + crc32_result = crc32_result ^ CRC32_POLY; + crc32_result |= 1; /*crc32_result[0] = 1;*/ + } + } + } + return crc32_result; +} + +static u32 qed_crc32c_le(u32 seed, u8 *mac, u32 len) +{ + u32 packet_buf[2] = { 0 }; + + memcpy((u8 *)(&packet_buf[0]), &mac[0], 6); + return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0); +} + +u8 qed_mcast_bin_from_mac(u8 *mac) +{ + u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, + mac, ETH_ALEN); + + return crc & 0xff; +} + +static int +qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct vport_update_ramrod_data *p_ramrod = NULL; + u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u8 abs_vport_id = 0; + int rc, i; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &abs_vport_id); + else + rc = qed_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &abs_vport_id); + if (rc) + return rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc) { + DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.vport_update; + p_ramrod->common.update_approx_mcast_flg = 1; + + /* explicitly clear out the entire vector */ + memset(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + memset(bins, 0, sizeof(bins)); + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport + */ + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit, nbits; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + nbits = sizeof(u32) * BITS_PER_BYTE; + bins[bit / nbits] |= 1 << (bit % nbits); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; + p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]); + } + } + + p_ramrod->common.vport_id = abs_vport_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_filter_mcast_cmd(struct qed_dev *cdev, + struct qed_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + /* only ADD and REMOVE operations are supported for multi-cast */ + if ((p_filter_cmd->opcode != QED_FILTER_ADD && + (p_filter_cmd->opcode != QED_FILTER_REMOVE)) || + (p_filter_cmd->num_mc_addrs > QED_MAX_MC_ADDRS)) + return -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + u16 opaque_fid; + + if (IS_VF(cdev)) { + qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); + continue; + } + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_mcast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, p_comp_data); + } + return rc; +} + +static int qed_filter_ucast_cmd(struct qed_dev *cdev, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + int rc = 0; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u16 opaque_fid; + + if (IS_VF(cdev)) { + rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); + continue; + } + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + rc = qed_sp_eth_filter_ucast(p_hwfn, + opaque_fid, + p_filter_cmd, + comp_mode, p_comp_data); + if (rc) + break; + } + + return rc; +} + +/* Statistics related code */ +static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, u16 statistics_bin) +{ + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.pstats.address; + *p_len = p_resp->pfdev_info.stats_info.pstats.len; + } +} + +static noinline_for_stack void +__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) +{ + struct eth_pstorm_per_queue_stat pstats; + u32 pstats_addr = 0, pstats_len = 0; + + __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, + statistics_bin); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); + + p_stats->common.tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->common.tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->common.tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->common.tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->common.tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->common.tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->common.tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); +} + +static noinline_for_stack void +__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) +{ + struct tstorm_per_port_stat tstats; + u32 tstats_addr, tstats_len; + + if (IS_PF(p_hwfn->cdev)) { + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + tstats_len = sizeof(struct tstorm_per_port_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; + tstats_len = p_resp->pfdev_info.stats_info.tstats.len; + } + + memset(&tstats, 0, sizeof(tstats)); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); + + p_stats->common.mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->common.mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); + p_stats->common.gft_filter_drop += + HILO_64_REGPAIR(tstats.eth_gft_drop_pkt); +} + +static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, u16 statistics_bin) +{ + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.ustats.address; + *p_len = p_resp->pfdev_info.stats_info.ustats.len; + } +} + +static noinline_for_stack +void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) +{ + struct eth_ustorm_per_queue_stat ustats; + u32 ustats_addr = 0, ustats_len = 0; + + __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, + statistics_bin); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); + + p_stats->common.rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->common.rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->common.rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->common.rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->common.rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->common.rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, u16 statistics_bin) +{ + if (IS_PF(p_hwfn->cdev)) { + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); + } else { + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.mstats.address; + *p_len = p_resp->pfdev_info.stats_info.mstats.len; + } +} + +static noinline_for_stack void +__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, u16 statistics_bin) +{ + struct eth_mstorm_per_queue_stat mstats; + u32 mstats_addr = 0, mstats_len = 0; + + __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, + statistics_bin); + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); + + p_stats->common.no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->common.packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->common.ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->common.tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->common.tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->common.tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->common.tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); +} + +static noinline_for_stack void +__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) +{ + struct qed_eth_stats_common *p_common = &p_stats->common; + struct port_stats port_stats; + int j; + + memset(&port_stats, 0, sizeof(port_stats)); + + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, stats), + sizeof(port_stats)); + + p_common->rx_64_byte_packets += port_stats.eth.r64; + p_common->rx_65_to_127_byte_packets += port_stats.eth.r127; + p_common->rx_128_to_255_byte_packets += port_stats.eth.r255; + p_common->rx_256_to_511_byte_packets += port_stats.eth.r511; + p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023; + p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518; + p_common->rx_crc_errors += port_stats.eth.rfcs; + p_common->rx_mac_crtl_frames += port_stats.eth.rxcf; + p_common->rx_pause_frames += port_stats.eth.rxpf; + p_common->rx_pfc_frames += port_stats.eth.rxpp; + p_common->rx_align_errors += port_stats.eth.raln; + p_common->rx_carrier_errors += port_stats.eth.rfcr; + p_common->rx_oversize_packets += port_stats.eth.rovr; + p_common->rx_jabbers += port_stats.eth.rjbr; + p_common->rx_undersize_packets += port_stats.eth.rund; + p_common->rx_fragments += port_stats.eth.rfrg; + p_common->tx_64_byte_packets += port_stats.eth.t64; + p_common->tx_65_to_127_byte_packets += port_stats.eth.t127; + p_common->tx_128_to_255_byte_packets += port_stats.eth.t255; + p_common->tx_256_to_511_byte_packets += port_stats.eth.t511; + p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023; + p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518; + p_common->tx_pause_frames += port_stats.eth.txpf; + p_common->tx_pfc_frames += port_stats.eth.txpp; + p_common->rx_mac_bytes += port_stats.eth.rbyte; + p_common->rx_mac_uc_packets += port_stats.eth.rxuca; + p_common->rx_mac_mc_packets += port_stats.eth.rxmca; + p_common->rx_mac_bc_packets += port_stats.eth.rxbca; + p_common->rx_mac_frames_ok += port_stats.eth.rxpok; + p_common->tx_mac_bytes += port_stats.eth.tbyte; + p_common->tx_mac_uc_packets += port_stats.eth.txuca; + p_common->tx_mac_mc_packets += port_stats.eth.txmca; + p_common->tx_mac_bc_packets += port_stats.eth.txbca; + p_common->tx_mac_ctrl_frames += port_stats.eth.txcf; + for (j = 0; j < 8; j++) { + p_common->brb_truncates += port_stats.brb.brb_truncate[j]; + p_common->brb_discards += port_stats.brb.brb_discard[j]; + } + + if (QED_IS_BB(p_hwfn->cdev)) { + struct qed_eth_stats_bb *p_bb = &p_stats->bb; + + p_bb->rx_1519_to_1522_byte_packets += + port_stats.eth.u0.bb0.r1522; + p_bb->rx_1519_to_2047_byte_packets += + port_stats.eth.u0.bb0.r2047; + p_bb->rx_2048_to_4095_byte_packets += + port_stats.eth.u0.bb0.r4095; + p_bb->rx_4096_to_9216_byte_packets += + port_stats.eth.u0.bb0.r9216; + p_bb->rx_9217_to_16383_byte_packets += + port_stats.eth.u0.bb0.r16383; + p_bb->tx_1519_to_2047_byte_packets += + port_stats.eth.u1.bb1.t2047; + p_bb->tx_2048_to_4095_byte_packets += + port_stats.eth.u1.bb1.t4095; + p_bb->tx_4096_to_9216_byte_packets += + port_stats.eth.u1.bb1.t9216; + p_bb->tx_9217_to_16383_byte_packets += + port_stats.eth.u1.bb1.t16383; + p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec; + p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl; + } else { + struct qed_eth_stats_ah *p_ah = &p_stats->ah; + + p_ah->rx_1519_to_max_byte_packets += + port_stats.eth.u0.ah0.r1519_to_max; + p_ah->tx_1519_to_max_byte_packets = + port_stats.eth.u1.ah1.t1519_to_max; + } + + p_common->link_change_count = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + link_change_count)); +} + +static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *stats, + u16 statistics_bin, bool b_get_port_stats) +{ + __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); + + if (b_get_port_stats && p_hwfn->mcp_info) + __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); +} + +static void _qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic) +{ + u8 fw_vport = 0; + int i; + + memset(stats, 0, sizeof(*stats)); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + bool b_get_port_stats; + + p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) + : NULL; + if (IS_PF(cdev)) { + /* The main vport index is relative first */ + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + goto out; + } + } + + if (IS_PF(cdev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + b_get_port_stats = IS_PF(cdev) && IS_LEAD_HWFN(p_hwfn); + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, + b_get_port_stats); + +out: + if (IS_PF(cdev) && p_ptt) + qed_ptt_release(p_hwfn, p_ptt); + } +} + +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) +{ + qed_get_vport_stats_context(cdev, stats, false); +} + +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic) +{ + u32 i; + + if (!cdev || cdev->recov_in_prog) { + memset(stats, 0, sizeof(*stats)); + return; + } + + _qed_get_vport_stats(cdev, stats, is_atomic); + + if (!cdev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void qed_reset_vport_stats(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) + : NULL; + u32 addr = 0, len = 0; + + if (IS_PF(cdev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); + + memset(&ustats, 0, sizeof(ustats)); + __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); + + memset(&pstats, 0, sizeof(pstats)); + __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); + + if (IS_PF(cdev)) + qed_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + * Link change stat is maintained by MFW, return its value as is. + */ + if (!cdev->reset_stats) { + DP_INFO(cdev, "Reset stats not allocated\n"); + } else { + _qed_get_vport_stats(cdev, cdev->reset_stats, false); + cdev->reset_stats->common.link_change_count = 0; + } +} + +static enum gft_profile_type +qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode) +{ + if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE) + return GFT_PROFILE_TYPE_4_TUPLE; + if (mode == QED_FILTER_CONFIG_MODE_IP_DEST) + return GFT_PROFILE_TYPE_IP_DST_ADDR; + if (mode == QED_FILTER_CONFIG_MODE_IP_SRC) + return GFT_PROFILE_TYPE_IP_SRC_ADDR; + return GFT_PROFILE_TYPE_L4_DST_PORT; +} + +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params) +{ + if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) + return; + + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { + qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, + p_cfg_params->tcp, + p_cfg_params->udp, + p_cfg_params->ipv4, + p_cfg_params->ipv6, + qed_arfs_mode_to_hsi(p_cfg_params->mode)); + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n", + p_cfg_params->tcp ? "Enable" : "Disable", + p_cfg_params->udp ? "Enable" : "Disable", + p_cfg_params->ipv4 ? "Enable" : "Disable", + p_cfg_params->ipv6 ? "Enable" : "Disable", + (u32)p_cfg_params->mode); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n"); + qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } +} + +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, + struct qed_spq_comp_cb *p_cb, + struct qed_ntuple_filter_params *p_params) +{ + struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (p_cb) { + init_data.comp_mode = QED_SPQ_MODE_CB; + init_data.p_comp_data = p_cb; + } else { + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + } + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_UPDATE_GFT_FILTER, + PROTOCOLID_ETH, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rx_update_gft; + + DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr); + p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length); + + if (p_params->b_is_drop) { + p_ramrod->vport_id = cpu_to_le16(ETH_GFT_TRASHCAN_VPORT); + } else { + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc) + goto err; + + if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) { + rc = qed_fw_l2_queue(p_hwfn, p_params->qid, + &abs_rx_q_id); + if (rc) + goto err; + + p_ramrod->rx_qid_valid = 1; + p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id); + } + + p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id); + } + + p_ramrod->flow_id_valid = 0; + p_ramrod->flow_id = 0; + p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER + : GFT_DELETE_FILTER; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", + abs_vport_id, abs_rx_q_id, + p_params->b_is_add ? "Adding" : "Removing", + (u64)p_params->addr, p_params->length); + + return qed_spq_post(p_hwfn, p_ent, NULL); + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; +} + +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_rx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, NULL); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES0); + + address = BAR0_MAP_REG_USDM_RAM + + USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_rx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_tx_coal) +{ + u32 coalesce, address, is_valid; + struct cau_sb_entry sb_entry; + u8 timer_res; + int rc; + + rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY + + p_cid->sb_igu_id * sizeof(u64), + (u64)(uintptr_t)&sb_entry, 2, NULL); + if (rc) { + DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc); + return rc; + } + + timer_res = GET_FIELD(le32_to_cpu(sb_entry.params), + CAU_SB_ENTRY_TIMER_RES1); + + address = BAR0_MAP_REG_XSDM_RAM + + XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id); + coalesce = qed_rd(p_hwfn, p_ptt, address); + + is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID); + if (!is_valid) + return -EINVAL; + + coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET); + *p_tx_coal = (u16)(coalesce << timer_res); + + return 0; +} + +int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_ptt *p_ptt; + int rc = 0; + + if (IS_VF(p_hwfn->cdev)) { + rc = qed_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid); + if (rc) + DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); + + return rc; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + if (p_cid->b_is_rx) { + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } else { + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal); + if (rc) + goto out; + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +static int qed_fill_eth_dev_info(struct qed_dev *cdev, + struct qed_dev_eth_info *info) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + int i; + + memset(info, 0, sizeof(*info)); + + if (IS_PF(cdev)) { + int max_vf_vlan_filters = 0; + int max_vf_mac_filters = 0; + + info->num_tc = p_hwfn->hw_info.num_hw_tc; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + u16 num_queues = 0; + + /* Since the feature controls only queue-zones, + * make sure we have the contexts [rx, xdp, tcs] to + * match. + */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + u16 l2_queues = (u16)FEAT_NUM(hwfn, + QED_PF_L2_QUE); + u16 cids; + + cids = hwfn->pf_params.eth_pf_params.num_cons; + cids /= (2 + info->num_tc); + num_queues += min_t(u16, l2_queues, cids); + } + + /* queues might theoretically be >256, but interrupts' + * upper-limit guarantes that it would fit in a u8. + */ + if (cdev->int_params.fp_msix_cnt) { + u8 irqs = cdev->int_params.fp_msix_cnt; + + info->num_queues = (u8)min_t(u16, + num_queues, irqs); + } + } else { + info->num_queues = cdev->num_hwfns; + } + + if (IS_QED_SRIOV(cdev)) { + max_vf_vlan_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_VLAN_FILTERS; + max_vf_mac_filters = cdev->p_iov_info->total_vfs * + QED_ETH_VF_NUM_MAC_FILTERS; + } + info->num_vlan_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_VLAN) - + max_vf_vlan_filters; + info->num_mac_filters = RESC_NUM(QED_LEADING_HWFN(cdev), + QED_MAC) - + max_vf_mac_filters; + + ether_addr_copy(info->port_mac, + cdev->hwfns[0].hw_info.hw_mac_addr); + + info->xdp_supported = true; + } else { + u16 total_cids = 0; + + info->num_tc = 1; + + /* Determine queues & XDP support */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + u8 queues, cids; + + qed_vf_get_num_cids(p_hwfn, &cids); + qed_vf_get_num_rxqs(p_hwfn, &queues); + info->num_queues += queues; + total_cids += cids; + } + + /* Enable VF XDP in case PF guarntees sufficient connections */ + if (total_cids >= info->num_queues * 3) + info->xdp_supported = true; + + qed_vf_get_num_vlan_filters(&cdev->hwfns[0], + (u8 *)&info->num_vlan_filters); + qed_vf_get_num_mac_filters(&cdev->hwfns[0], + (u8 *)&info->num_mac_filters); + qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac); + + info->is_legacy = !!cdev->hwfns[0].vf_iov_info->b_pre_fp_hsi; + } + + qed_fill_dev_info(cdev, &info->common); + + if (IS_VF(cdev)) + eth_zero_addr(info->common.hw_mac); + + return 0; +} + +static void qed_register_eth_ops(struct qed_dev *cdev, + struct qed_eth_cb_ops *ops, void *cookie) +{ + cdev->protocol_ops.eth = ops; + cdev->ops_cookie = cookie; + + /* For VF, we start bulletin reading */ + if (IS_VF(cdev)) + qed_vf_start_iov_wq(cdev); +} + +static bool qed_check_mac(struct qed_dev *cdev, u8 *mac) +{ + if (IS_PF(cdev)) + return true; + + return qed_vf_check_mac(&cdev->hwfns[0], mac); +} + +static int qed_start_vport(struct qed_dev *cdev, + struct qed_start_vport_params *params) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_sp_vport_start_params start = { 0 }; + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : + QED_TPA_MODE_NONE; + start.remove_inner_vlan = params->remove_inner_vlan; + start.only_untagged = true; /* untagged only */ + start.drop_ttl0 = params->drop_ttl0; + start.opaque_fid = p_hwfn->hw_info.opaque_fid; + start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.handle_ptp_pkts = params->handle_ptp_pkts; + start.vport_id = params->vport_id; + start.max_buffers_per_cqe = 16; + start.mtu = params->mtu; + + rc = qed_sp_vport_start(p_hwfn, &start); + if (rc) { + DP_ERR(cdev, "Failed to start VPORT\n"); + return rc; + } + + rc = qed_hw_start_fastpath(p_hwfn); + if (rc) { + DP_ERR(cdev, "Failed to start VPORT fastpath\n"); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started V-PORT %d with MTU %d\n", + start.vport_id, start.mtu); + } + + if (params->clear_stats) + qed_reset_vport_stats(cdev); + + return 0; +} + +static int qed_stop_vport(struct qed_dev *cdev, u8 vport_id) +{ + int rc, i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_sp_vport_stop(p_hwfn, + p_hwfn->hw_info.opaque_fid, vport_id); + + if (rc) { + DP_ERR(cdev, "Failed to stop VPORT\n"); + return rc; + } + } + return 0; +} + +static int qed_update_vport_rss(struct qed_dev *cdev, + struct qed_update_vport_rss_params *input, + struct qed_rss_params *rss) +{ + int i, fn; + + /* Update configuration with what's correct regardless of CMT */ + rss->update_rss_config = 1; + rss->rss_enable = 1; + rss->update_rss_capabilities = 1; + rss->update_rss_ind_table = 1; + rss->update_rss_key = 1; + rss->rss_caps = input->rss_caps; + memcpy(rss->rss_key, input->rss_key, QED_RSS_KEY_SIZE * sizeof(u32)); + + /* In regular scenario, we'd simply need to take input handlers. + * But in CMT, we'd have to split the handlers according to the + * engine they were configured on. We'd then have to understand + * whether RSS is really required, since 2-queues on CMT doesn't + * require RSS. + */ + if (cdev->num_hwfns == 1) { + memcpy(rss->rss_ind_table, + input->rss_ind_table, + QED_RSS_IND_TABLE_SIZE * sizeof(void *)); + rss->rss_table_size_log = 7; + return 0; + } + + /* Start by copying the non-spcific information to the 2nd copy */ + memcpy(&rss[1], &rss[0], sizeof(struct qed_rss_params)); + + /* CMT should be round-robin */ + for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) { + struct qed_queue_cid *cid = input->rss_ind_table[i]; + struct qed_rss_params *t_rss; + + if (cid->p_owner == QED_LEADING_HWFN(cdev)) + t_rss = &rss[0]; + else + t_rss = &rss[1]; + + t_rss->rss_ind_table[i / cdev->num_hwfns] = cid; + } + + /* Make sure RSS is actually required */ + for_each_hwfn(cdev, fn) { + for (i = 1; i < QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns; i++) { + if (rss[fn].rss_ind_table[i] != + rss[fn].rss_ind_table[0]) + break; + } + if (i == QED_RSS_IND_TABLE_SIZE / cdev->num_hwfns) { + DP_VERBOSE(cdev, NETIF_MSG_IFUP, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + return -EINVAL; + } + rss[fn].rss_table_size_log = 6; + } + + return 0; +} + +static int qed_update_vport(struct qed_dev *cdev, + struct qed_update_vport_params *params) +{ + struct qed_sp_vport_update_params sp_params; + struct qed_rss_params *rss; + int rc = 0, i; + + if (!cdev) + return -ENODEV; + + rss = vzalloc(array_size(sizeof(*rss), cdev->num_hwfns)); + if (!rss) + return -ENOMEM; + + memset(&sp_params, 0, sizeof(sp_params)); + + /* Translate protocol params into sp params */ + sp_params.vport_id = params->vport_id; + sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; + sp_params.vport_active_rx_flg = params->vport_active_flg; + sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.update_tx_switching_flg = params->update_tx_switching_flg; + sp_params.tx_switching_flg = params->tx_switching_flg; + sp_params.accept_any_vlan = params->accept_any_vlan; + sp_params.update_accept_any_vlan_flg = + params->update_accept_any_vlan_flg; + + /* Prepare the RSS configuration */ + if (params->update_rss_flg) + if (qed_update_vport_rss(cdev, ¶ms->rss_params, rss)) + params->update_rss_flg = 0; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (params->update_rss_flg) + sp_params.rss_params = &rss[i]; + + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = qed_sp_vport_update(p_hwfn, &sp_params, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_ERR(cdev, "Failed to update VPORT\n"); + goto out; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Updated V-PORT %d: active_flag %d [update %d]\n", + params->vport_id, params->vport_active_flg, + params->update_vport_active_flg); + } + +out: + vfree(rss); + return rc; +} + +static int qed_start_rxq(struct qed_dev *cdev, + u8 rss_num, + struct qed_queue_start_common_params *p_params, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + struct qed_rxq_start_ret_params *ret_params) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = rss_num % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; + + rc = qed_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, ret_params); + if (rc) { + DP_ERR(cdev, "Failed to start RXQ#%d\n", p_params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started RX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", + p_params->queue_id, rss_num, p_params->vport_id, + p_params->p_sb->igu_sb_id); + + return 0; +} + +static int qed_stop_rxq(struct qed_dev *cdev, u8 rss_id, void *handle) +{ + int rc, hwfn_index; + struct qed_hwfn *p_hwfn; + + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_eth_rx_queue_stop(p_hwfn, handle, false, false); + if (rc) { + DP_ERR(cdev, "Failed to stop RXQ#%02x\n", rss_id); + return rc; + } + + return 0; +} + +static int qed_start_txq(struct qed_dev *cdev, + u8 rss_num, + struct qed_queue_start_common_params *p_params, + dma_addr_t pbl_addr, + u16 pbl_size, + struct qed_txq_start_ret_params *ret_params) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = rss_num % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + p_params->queue_id = p_params->queue_id / cdev->num_hwfns; + p_params->stats_id = p_params->vport_id; + + rc = qed_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_params, p_params->tc, + pbl_addr, pbl_size, ret_params); + + if (rc) { + DP_ERR(cdev, "Failed to start TXQ#%d\n", p_params->queue_id); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), + "Started TX-Q %d [rss_num %d] on V-PORT %d and SB igu %d\n", + p_params->queue_id, rss_num, p_params->vport_id, + p_params->p_sb->igu_sb_id); + + return 0; +} + +#define QED_HW_STOP_RETRY_LIMIT (10) +static int qed_fastpath_stop(struct qed_dev *cdev) +{ + int rc; + + rc = qed_hw_stop_fastpath(cdev); + if (rc) { + DP_ERR(cdev, "Failed to stop Fastpath\n"); + return rc; + } + + return 0; +} + +static int qed_stop_txq(struct qed_dev *cdev, u8 rss_id, void *handle) +{ + struct qed_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = rss_id % cdev->num_hwfns; + p_hwfn = &cdev->hwfns[hwfn_index]; + + rc = qed_eth_tx_queue_stop(p_hwfn, handle); + if (rc) { + DP_ERR(cdev, "Failed to stop TXQ#%02x\n", rss_id); + return rc; + } + + return 0; +} + +static int qed_tunn_configure(struct qed_dev *cdev, + struct qed_tunn_params *tunn_params) +{ + struct qed_tunnel_info tunn_info; + int i, rc; + + memset(&tunn_info, 0, sizeof(tunn_info)); + if (tunn_params->update_vxlan_port) { + tunn_info.vxlan_port.b_update_port = true; + tunn_info.vxlan_port.port = tunn_params->vxlan_port; + } + + if (tunn_params->update_geneve_port) { + tunn_info.geneve_port.b_update_port = true; + tunn_info.geneve_port.port = tunn_params->geneve_port; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + struct qed_tunnel_info *tun; + + tun = &hwfn->cdev->tunnel; + if (IS_PF(cdev)) { + p_ptt = qed_ptt_acquire(hwfn); + if (!p_ptt) + return -EAGAIN; + } else { + p_ptt = NULL; + } + + rc = qed_sp_pf_update_tunn_cfg(hwfn, p_ptt, &tunn_info, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + if (IS_PF(cdev)) + qed_ptt_release(hwfn, p_ptt); + return rc; + } + + if (IS_PF_SRIOV(hwfn)) { + u16 vxlan_port, geneve_port; + int j; + + vxlan_port = tun->vxlan_port.port; + geneve_port = tun->geneve_port.port; + + qed_for_each_vf(hwfn, j) { + qed_iov_bulletin_set_udp_ports(hwfn, j, + vxlan_port, + geneve_port); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + if (IS_PF(cdev)) + qed_ptt_release(hwfn, p_ptt); + } + + return 0; +} + +static int qed_configure_filter_rx_mode(struct qed_dev *cdev, + enum qed_filter_rx_mode_type type) +{ + struct qed_filter_accept_flags accept_flags; + + memset(&accept_flags, 0, sizeof(accept_flags)); + + accept_flags.update_rx_mode_config = 1; + accept_flags.update_tx_mode_config = 1; + accept_flags.rx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + accept_flags.tx_accept_filter = QED_ACCEPT_UCAST_MATCHED | + QED_ACCEPT_MCAST_MATCHED | + QED_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { + accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | + QED_ACCEPT_MCAST_UNMATCHED; + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { + accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; + } + + return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, + QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_ucast(struct qed_dev *cdev, + struct qed_filter_ucast_params *params) +{ + struct qed_filter_ucast ucast; + + if (!params->vlan_valid && !params->mac_valid) { + DP_NOTICE(cdev, + "Tried configuring a unicast filter, but both MAC and VLAN are not set\n"); + return -EINVAL; + } + + memset(&ucast, 0, sizeof(ucast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + ucast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + ucast.opcode = QED_FILTER_REMOVE; + break; + case QED_FILTER_XCAST_TYPE_REPLACE: + ucast.opcode = QED_FILTER_REPLACE; + break; + default: + DP_NOTICE(cdev, "Unknown unicast filter type %d\n", + params->type); + } + + if (params->vlan_valid && params->mac_valid) { + ucast.type = QED_FILTER_MAC_VLAN; + ether_addr_copy(ucast.mac, params->mac); + ucast.vlan = params->vlan; + } else if (params->mac_valid) { + ucast.type = QED_FILTER_MAC; + ether_addr_copy(ucast.mac, params->mac); + } else { + ucast.type = QED_FILTER_VLAN; + ucast.vlan = params->vlan; + } + + ucast.is_rx_filter = true; + ucast.is_tx_filter = true; + + return qed_filter_ucast_cmd(cdev, &ucast, QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_filter_mcast(struct qed_dev *cdev, + struct qed_filter_mcast_params *params) +{ + struct qed_filter_mcast mcast; + int i; + + memset(&mcast, 0, sizeof(mcast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + mcast.opcode = QED_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + mcast.opcode = QED_FILTER_REMOVE; + break; + default: + DP_NOTICE(cdev, "Unknown multicast filter type %d\n", + params->type); + } + + mcast.num_mc_addrs = params->num; + for (i = 0; i < mcast.num_mc_addrs; i++) + ether_addr_copy(mcast.mac[i], params->mac[i]); + + return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL); +} + +static int qed_configure_arfs_searcher(struct qed_dev *cdev, + enum qed_filter_config_mode mode) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_arfs_config_params arfs_config_params; + + memset(&arfs_config_params, 0, sizeof(arfs_config_params)); + arfs_config_params.tcp = true; + arfs_config_params.udp = true; + arfs_config_params.ipv4 = true; + arfs_config_params.ipv6 = true; + arfs_config_params.mode = mode; + qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, + &arfs_config_params); + return 0; +} + +static void +qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, u8 fw_return_code) +{ + struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; + void *dev = p_hwfn->cdev->ops_cookie; + + op->arfs_filter_op(dev, cookie, fw_return_code); +} + +static int +qed_ntuple_arfs_filter_config(struct qed_dev *cdev, + void *cookie, + struct qed_ntuple_filter_params *params) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_spq_comp_cb cb; + int rc = -EINVAL; + + cb.function = qed_arfs_sp_response_handler; + cb.cookie = cookie; + + if (params->b_is_vf) { + if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false, + false)) { + DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n", + params->vf_id); + return rc; + } + + params->vport_id = params->vf_id + 1; + params->qid = QED_RFS_NTUPLE_QID_RSS; + } + + rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to issue a-RFS filter configuration\n"); + else + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Successfully issued a-RFS filter configuration\n"); + + return rc; +} + +static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) +{ + struct qed_queue_cid *p_cid = handle; + struct qed_hwfn *p_hwfn; + int rc; + + p_hwfn = p_cid->p_owner; + rc = qed_get_queue_coalesce(p_hwfn, coal, handle); + if (rc) + DP_VERBOSE(cdev, QED_MSG_DEBUG, + "Unable to read queue coalescing\n"); + + return rc; +} + +static int qed_fp_cqe_completion(struct qed_dev *dev, + u8 rss_id, struct eth_slow_path_rx_cqe *cqe) +{ + return qed_eth_cqe_completion(&dev->hwfns[rss_id % dev->num_hwfns], + cqe); +} + +static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac) +{ + int i, ret; + + if (IS_PF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + ret = qed_vf_pf_bulletin_update_mac(p_hwfn, mac); + if (ret) + return ret; + } + + return 0; +} + +static const struct qed_eth_ops qed_eth_ops_pass = { + .common = &qed_common_ops_pass, +#ifdef CONFIG_QED_SRIOV + .iov = &qed_iov_ops_pass, +#endif +#ifdef CONFIG_DCB + .dcb = &qed_dcbnl_ops_pass, +#endif + .ptp = &qed_ptp_ops_pass, + .fill_dev_info = &qed_fill_eth_dev_info, + .register_ops = &qed_register_eth_ops, + .check_mac = &qed_check_mac, + .vport_start = &qed_start_vport, + .vport_stop = &qed_stop_vport, + .vport_update = &qed_update_vport, + .q_rx_start = &qed_start_rxq, + .q_rx_stop = &qed_stop_rxq, + .q_tx_start = &qed_start_txq, + .q_tx_stop = &qed_stop_txq, + .filter_config_rx_mode = &qed_configure_filter_rx_mode, + .filter_config_ucast = &qed_configure_filter_ucast, + .filter_config_mcast = &qed_configure_filter_mcast, + .fastpath_stop = &qed_fastpath_stop, + .eth_cqe_completion = &qed_fp_cqe_completion, + .get_vport_stats = &qed_get_vport_stats, + .tunn_config = &qed_tunn_configure, + .ntuple_filter_config = &qed_ntuple_arfs_filter_config, + .configure_arfs_searcher = &qed_configure_arfs_searcher, + .get_coalesce = &qed_get_coalesce, + .req_bulletin_update_mac = &qed_req_bulletin_update_mac, +}; + +const struct qed_eth_ops *qed_get_eth_ops(void) +{ + return &qed_eth_ops_pass; +} +EXPORT_SYMBOL(qed_get_eth_ops); + +void qed_put_eth_ops(void) +{ + /* TODO - reference count for module? */ +} +EXPORT_SYMBOL(qed_put_eth_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h new file mode 100644 index 0000000000..2d2f82c785 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -0,0 +1,454 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_L2_H +#define _QED_L2_H +#include <linux/types.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/qed/qed_eth_if.h> +#include "qed.h" +#include "qed_hw.h" +#include "qed_sp.h" +struct qed_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; + + /* Indirection table consist of rx queue handles */ + void *rss_ind_table[QED_RSS_IND_TABLE_SIZE]; + u32 rss_key[QED_RSS_KEY_SIZE]; +}; + +struct qed_sge_tpa_params { + u8 max_buffers_per_cqe; + + u8 update_tpa_en_flg; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + + u8 update_tpa_param_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; +}; + +enum qed_filter_opcode { + QED_FILTER_ADD, + QED_FILTER_REMOVE, + QED_FILTER_MOVE, + QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + QED_FILTER_FLUSH, /* Removes all filters */ +}; + +enum qed_filter_ucast_type { + QED_FILTER_MAC, + QED_FILTER_VLAN, + QED_FILTER_MAC_VLAN, + QED_FILTER_INNER_MAC, + QED_FILTER_INNER_VLAN, + QED_FILTER_INNER_PAIR, + QED_FILTER_INNER_MAC_VNI_PAIR, + QED_FILTER_MAC_VNI_PAIR, + QED_FILTER_VNI, +}; + +struct qed_filter_ucast { + enum qed_filter_opcode opcode; + enum qed_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct qed_filter_mcast { + /* MOVE is not supported for multicast */ + enum qed_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define QED_MAX_MC_ADDRS 64 + unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; +}; + +/** + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. + * + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. + */ +int +qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, + void *p_rxq, + bool eq_completion_only, bool cqe_completion); + +/** + * qed_eth_tx_queue_stop(): Closes a Tx queue. + * + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. + * + * Return: Int. + */ +int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); + +enum qed_tpa_mode { + QED_TPA_MODE_NONE, + QED_TPA_MODE_UNUSED, + QED_TPA_MODE_GRO, + QED_TPA_MODE_MAX +}; + +struct qed_sp_vport_start_params { + enum qed_tpa_mode tpa_mode; + bool remove_inner_vlan; + bool tx_switching; + bool handle_ptp_pkts; + bool only_untagged; + bool drop_ttl0; + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; + u16 mtu; + bool check_mac; + bool check_ethtype; +}; + +int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +struct qed_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define QED_ACCEPT_NONE 0x01 +#define QED_ACCEPT_UCAST_MATCHED 0x02 +#define QED_ACCEPT_UCAST_UNMATCHED 0x04 +#define QED_ACCEPT_MCAST_MATCHED 0x08 +#define QED_ACCEPT_MCAST_UNMATCHED 0x10 +#define QED_ACCEPT_BCAST 0x20 +#define QED_ACCEPT_ANY_VNI 0x40 +}; + +struct qed_arfs_config_params { + bool tcp; + bool udp; + bool ipv4; + bool ipv6; + enum qed_filter_config_mode mode; +}; + +struct qed_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u32 bins[8]; + struct qed_rss_params *rss_params; + struct qed_filter_accept_flags accept_flags; + struct qed_sge_tpa_params *sge_tpa_params; + u8 update_ctl_frame_check; + u8 mac_chk_en; + u8 ethtype_chk_en; +}; + +int qed_sp_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. + * + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. + * + * Return: Int. + */ +int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); + +int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. + * + * Return: Int. + * + * Note At the moment - only used by non-linux VFs. + */ + +int +qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, + void **pp_rxq_handlers, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); + +/** + * qed_get_vport_stats(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. + */ +void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); + +/** + * qed_get_vport_stats_context(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic); + +void qed_reset_vport_stats(struct qed_dev *cdev); + +/** + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. + * + * Return. Void. + */ +void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_arfs_config_params *p_cfg_params); + +/** + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter + * + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. + * + * Return: Void. + */ +int +qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, + struct qed_spq_comp_cb *p_cb, + struct qed_ntuple_filter_params *p_params); + +#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) +#define QED_QUEUE_CID_SELF (0xff) + +/* Almost identical to the qed_queue_start_common_params, + * but here we maintain the SB index in IGU CAM. + */ +struct qed_queue_cid_params { + u8 vport_id; + u16 queue_id; + u8 stats_id; +}; + +/* Additional parameters required for initialization of the queue_cid + * and are relevant only for a PF initializing one for its VFs. + */ +struct qed_queue_cid_vf_params { + /* Should match the VF's relative index */ + u8 vfid; + + /* 0-based queue index. Should reflect the relative qzone the + * VF thinks is associated with it [in its range]. + */ + u8 vf_qid; + + /* Indicates a VF is legacy, making it differ in several things: + * - Producers would be placed in a different place. + * - Makes assumptions regarding the CIDs. + */ + u8 vf_legacy; + + u8 qid_usage_idx; +}; + +struct qed_queue_cid { + /* For stats-id, the `rel' is actually absolute as well */ + struct qed_queue_cid_params rel; + struct qed_queue_cid_params abs; + + /* These have no 'relative' meaning */ + u16 sb_igu_id; + u8 sb_idx; + + u32 cid; + u16 opaque_fid; + + bool b_is_rx; + + /* VFs queues are mapped differently, so we need to know the + * relative queue associated with them [0-based]. + * Notice this is relevant on the *PF* queue-cid of its VF's queues, + * and not on the VF itself. + */ + u8 vfid; + u8 vf_qid; + + /* We need an additional index to differentiate between queues opened + * for same queue-zone, as VFs would have to communicate the info + * to the PF [otherwise PF has no way to differentiate]. + */ + u8 qid_usage_idx; + + u8 vf_legacy; +#define QED_QCID_LEGACY_VF_RX_PROD (BIT(0)) +#define QED_QCID_LEGACY_VF_CID (BIT(1)) + + struct qed_hwfn *p_owner; +}; + +int qed_l2_alloc(struct qed_hwfn *p_hwfn); +void qed_l2_setup(struct qed_hwfn *p_hwfn); +void qed_l2_free(struct qed_hwfn *p_hwfn); + +void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid); + +struct qed_queue_cid * +qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn, + u16 opaque_fid, + struct qed_queue_start_common_params *p_params, + bool b_is_rx, + struct qed_queue_cid_vf_params *p_vf_params); + +int +qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_start_params *p_params); + +/** + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared + * + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. + * + * Return: Int. + */ +int +qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +/** + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared + * + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. + * + * Return: Int. + */ +int +qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id); + +u8 qed_mcast_bin_from_mac(u8 *mac); + +int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 coalesce, struct qed_queue_cid *p_cid); + +int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_queue_cid *p_cid, u16 *p_hw_coal); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c new file mode 100644 index 0000000000..ab5ef254a7 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -0,0 +1,2822 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/if_vlan.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/stddef.h> +#include <linux/workqueue.h> +#include <net/ipv6.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/etherdevice.h> +#include <linux/io.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/qed/qed_ll2_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_ooo.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_rdma.h" + +#define QED_LL2_RX_REGISTERED(ll2) ((ll2)->rx_queue.b_cb_registered) +#define QED_LL2_TX_REGISTERED(ll2) ((ll2)->tx_queue.b_cb_registered) + +#define QED_LL2_TX_SIZE (256) +#define QED_LL2_RX_SIZE (4096) + +#define QED_LL2_INVALID_STATS_ID 0xff + +struct qed_cb_ll2_info { + int rx_cnt; + u32 rx_size; + u8 handle; + + /* Lock protecting LL2 buffer lists in sleepless context */ + spinlock_t lock; + struct list_head list; + + const struct qed_ll2_cb_ops *cbs; + void *cb_cookie; +}; + +struct qed_ll2_buffer { + struct list_head list; + void *data; + dma_addr_t phys_addr; +}; + +static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn, + u8 ll2_queue_type, u8 qid) +{ + u8 stats_id; + + /* For legacy (RAM based) queues, the stats_id will be set as the + * queue_id. Otherwise (context based queue), it will be set to + * the "abs_pf_id" offset from the end of the RAM based queue IDs. + * If the final value exceeds the total counters amount, return + * INVALID value to indicate that the stats for this connection should + * be disabled. + */ + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + stats_id = qid; + else + stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id; + + if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS) + return stats_id; + else + return QED_LL2_INVALID_STATS_ID; +} + +static void qed_ll2b_complete_tx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t first_frag_addr, + bool b_last_fragment, + bool b_last_packet) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_dev *cdev = p_hwfn->cdev; + struct sk_buff *skb = cookie; + + /* All we need to do is release the mapping */ + dma_unmap_single(&p_hwfn->cdev->pdev->dev, first_frag_addr, + skb_headlen(skb), DMA_TO_DEVICE); + + if (cdev->ll2->cbs && cdev->ll2->cbs->tx_cb) + cdev->ll2->cbs->tx_cb(cdev->ll2->cb_cookie, skb, + b_last_fragment); + + dev_kfree_skb_any(skb); +} + +static int qed_ll2_alloc_buffer(struct qed_dev *cdev, + u8 **data, dma_addr_t *phys_addr) +{ + size_t size = cdev->ll2->rx_size + NET_SKB_PAD + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + *data = kmalloc(size, GFP_ATOMIC); + if (!(*data)) { + DP_INFO(cdev, "Failed to allocate LL2 buffer data\n"); + return -ENOMEM; + } + + *phys_addr = dma_map_single(&cdev->pdev->dev, + ((*data) + NET_SKB_PAD), + cdev->ll2->rx_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&cdev->pdev->dev, *phys_addr)) { + DP_INFO(cdev, "Failed to map LL2 buffer data\n"); + kfree((*data)); + return -ENOMEM; + } + + return 0; +} + +static int qed_ll2_dealloc_buffer(struct qed_dev *cdev, + struct qed_ll2_buffer *buffer) +{ + spin_lock_bh(&cdev->ll2->lock); + + dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, + cdev->ll2->rx_size, DMA_FROM_DEVICE); + kfree(buffer->data); + list_del(&buffer->list); + + cdev->ll2->rx_cnt--; + if (!cdev->ll2->rx_cnt) + DP_INFO(cdev, "All LL2 entries were removed\n"); + + spin_unlock_bh(&cdev->ll2->lock); + + return 0; +} + +static void qed_ll2_kill_buffers(struct qed_dev *cdev) +{ + struct qed_ll2_buffer *buffer, *tmp_buffer; + + list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) + qed_ll2_dealloc_buffer(cdev, buffer); +} + +static void qed_ll2b_complete_rx_packet(void *cxt, + struct qed_ll2_comp_rx_data *data) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_buffer *buffer = data->cookie; + struct qed_dev *cdev = p_hwfn->cdev; + dma_addr_t new_phys_addr; + struct sk_buff *skb; + bool reuse = false; + int rc = -EINVAL; + u8 *new_data; + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_RX_STATUS | QED_MSG_STORAGE | NETIF_MSG_PKTDATA), + "Got an LL2 Rx completion: [Buffer at phys 0x%llx, offset 0x%02x] Length 0x%04x Parse_flags 0x%04x vlan 0x%04x Opaque data [0x%08x:0x%08x]\n", + (u64)data->rx_buf_addr, + data->u.placement_offset, + data->length.packet_length, + data->parse_flags, + data->vlan, data->opaque_data_0, data->opaque_data_1); + + if ((cdev->dp_module & NETIF_MSG_PKTDATA) && buffer->data) { + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_OFFSET, 16, 1, + buffer->data, data->length.packet_length, false); + } + + /* Determine if data is valid */ + if (data->length.packet_length < ETH_HLEN) + reuse = true; + + /* Allocate a replacement for buffer; Reuse upon failure */ + if (!reuse) + rc = qed_ll2_alloc_buffer(p_hwfn->cdev, &new_data, + &new_phys_addr); + + /* If need to reuse or there's no replacement buffer, repost this */ + if (rc) + goto out_post; + dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, + cdev->ll2->rx_size, DMA_FROM_DEVICE); + + skb = slab_build_skb(buffer->data); + if (!skb) { + DP_INFO(cdev, "Failed to build SKB\n"); + kfree(buffer->data); + goto out_post1; + } + + data->u.placement_offset += NET_SKB_PAD; + skb_reserve(skb, data->u.placement_offset); + skb_put(skb, data->length.packet_length); + skb_checksum_none_assert(skb); + + /* Get parital ethernet information instead of eth_type_trans(), + * Since we don't have an associated net_device. + */ + skb_reset_mac_header(skb); + skb->protocol = eth_hdr(skb)->h_proto; + + /* Pass SKB onward */ + if (cdev->ll2->cbs && cdev->ll2->cbs->rx_cb) { + if (data->vlan) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + data->vlan); + cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb, + data->opaque_data_0, + data->opaque_data_1); + } else { + DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA | + QED_MSG_LL2 | QED_MSG_STORAGE), + "Dropping the packet\n"); + kfree(buffer->data); + } + +out_post1: + /* Update Buffer information and update FW producer */ + buffer->data = new_data; + buffer->phys_addr = new_phys_addr; + +out_post: + rc = qed_ll2_post_rx_buffer(p_hwfn, cdev->ll2->handle, + buffer->phys_addr, 0, buffer, 1); + if (rc) + qed_ll2_dealloc_buffer(cdev, buffer); +} + +static struct qed_ll2_info *__qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, + u8 connection_handle, + bool b_lock, + bool b_only_active) +{ + struct qed_ll2_info *p_ll2_conn, *p_ret = NULL; + + if (connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) + return NULL; + + if (!p_hwfn->p_ll2_info) + return NULL; + + p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; + + if (b_only_active) { + if (b_lock) + mutex_lock(&p_ll2_conn->mutex); + if (p_ll2_conn->b_active) + p_ret = p_ll2_conn; + if (b_lock) + mutex_unlock(&p_ll2_conn->mutex); + } else { + p_ret = p_ll2_conn; + } + + return p_ret; +} + +static struct qed_ll2_info *qed_ll2_handle_sanity(struct qed_hwfn *p_hwfn, + u8 connection_handle) +{ + return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, true); +} + +static struct qed_ll2_info *qed_ll2_handle_sanity_lock(struct qed_hwfn *p_hwfn, + u8 connection_handle) +{ + return __qed_ll2_handle_sanity(p_hwfn, connection_handle, true, true); +} + +static struct qed_ll2_info *qed_ll2_handle_sanity_inactive(struct qed_hwfn + *p_hwfn, + u8 connection_handle) +{ + return __qed_ll2_handle_sanity(p_hwfn, connection_handle, false, false); +} + +static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) +{ + bool b_last_packet = false, b_last_frag = false; + struct qed_ll2_tx_packet *p_pkt = NULL; + struct qed_ll2_info *p_ll2_conn; + struct qed_ll2_tx_queue *p_tx; + unsigned long flags = 0; + dma_addr_t tx_frag; + + p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); + if (!p_ll2_conn) + return; + + p_tx = &p_ll2_conn->tx_queue; + + spin_lock_irqsave(&p_tx->lock, flags); + while (!list_empty(&p_tx->active_descq)) { + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + break; + + list_del(&p_pkt->list_entry); + b_last_packet = list_empty(&p_tx->active_descq); + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + spin_unlock_irqrestore(&p_tx->lock, flags); + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { + struct qed_ooo_buffer *p_buffer; + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + p_tx->cur_completing_packet = *p_pkt; + p_tx->cur_completing_bd_idx = 1; + b_last_frag = + p_tx->cur_completing_bd_idx == p_pkt->bd_used; + tx_frag = p_pkt->bds_set[0].tx_frag; + p_ll2_conn->cbs.tx_release_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + tx_frag, + b_last_frag, + b_last_packet); + } + spin_lock_irqsave(&p_tx->lock, flags); + } + spin_unlock_irqrestore(&p_tx->lock, flags); +} + +static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = p_cookie; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + u16 new_idx = 0, num_bds = 0, num_bds_in_packet = 0; + struct qed_ll2_tx_packet *p_pkt; + bool b_last_frag = false; + unsigned long flags; + int rc = -EINVAL; + + if (!p_ll2_conn) + return rc; + + spin_lock_irqsave(&p_tx->lock, flags); + if (p_tx->b_completing_packet) { + rc = -EBUSY; + goto out; + } + + new_idx = le16_to_cpu(*p_tx->p_fw_cons); + num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); + while (num_bds) { + if (list_empty(&p_tx->active_descq)) + goto out; + + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + goto out; + + p_tx->b_completing_packet = true; + p_tx->cur_completing_packet = *p_pkt; + num_bds_in_packet = p_pkt->bd_used; + list_del(&p_pkt->list_entry); + + if (unlikely(num_bds < num_bds_in_packet)) { + DP_NOTICE(p_hwfn, + "Rest of BDs does not cover whole packet\n"); + goto out; + } + + num_bds -= num_bds_in_packet; + p_tx->bds_idx += num_bds_in_packet; + while (num_bds_in_packet--) + qed_chain_consume(&p_tx->txq_chain); + + p_tx->cur_completing_bd_idx = 1; + b_last_frag = p_tx->cur_completing_bd_idx == p_pkt->bd_used; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + + spin_unlock_irqrestore(&p_tx->lock, flags); + + p_ll2_conn->cbs.tx_comp_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + p_pkt->cookie, + p_pkt->bds_set[0].tx_frag, + b_last_frag, !num_bds); + + spin_lock_irqsave(&p_tx->lock, flags); + } + + p_tx->b_completing_packet = false; + rc = 0; +out: + spin_unlock_irqrestore(&p_tx->lock, flags); + return rc; +} + +static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) +{ + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_gsi.parse_flags.flags); + data->length.data_length = le16_to_cpu(p_cqe->rx_cqe_gsi.data_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_gsi.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); + data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); + data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; + data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id); + + data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp); +} + +static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, + union core_rx_cqe_union *p_cqe, + struct qed_ll2_comp_rx_data *data) +{ + data->parse_flags = le16_to_cpu(p_cqe->rx_cqe_fp.parse_flags.flags); + data->err_flags = le16_to_cpu(p_cqe->rx_cqe_fp.err_flags.flags); + data->length.packet_length = + le16_to_cpu(p_cqe->rx_cqe_fp.packet_length); + data->vlan = le16_to_cpu(p_cqe->rx_cqe_fp.vlan); + data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[0]); + data->opaque_data_1 = le32_to_cpu(p_cqe->rx_cqe_fp.opaque_data.data[1]); + data->u.placement_offset = p_cqe->rx_cqe_fp.placement_offset; +} + +static int +qed_ll2_handle_slowpath(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, + unsigned long *p_lock_flags) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + struct core_rx_slow_path_cqe *sp_cqe; + + sp_cqe = &p_cqe->rx_cqe_sp; + if (sp_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) { + DP_NOTICE(p_hwfn, + "LL2 - unexpected Rx CQE slowpath ramrod_cmd_id:%d\n", + sp_cqe->ramrod_cmd_id); + return -EINVAL; + } + + if (!p_ll2_conn->cbs.slowpath_cb) { + DP_NOTICE(p_hwfn, + "LL2 - received RX_QUEUE_FLUSH but no callback was provided\n"); + return -EINVAL; + } + + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); + + p_ll2_conn->cbs.slowpath_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + le32_to_cpu(sp_cqe->opaque_data.data[0]), + le32_to_cpu(sp_cqe->opaque_data.data[1])); + + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); + + return 0; +} + +static int +qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + union core_rx_cqe_union *p_cqe, + unsigned long *p_lock_flags, bool b_last_cqe) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + struct qed_ll2_rx_packet *p_pkt = NULL; + struct qed_ll2_comp_rx_data data; + + if (!list_empty(&p_rx->active_descq)) + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + if (unlikely(!p_pkt)) { + DP_NOTICE(p_hwfn, + "[%d] LL2 Rx completion but active_descq is empty\n", + p_ll2_conn->input.conn_type); + + return -EIO; + } + list_del(&p_pkt->list_entry); + + if (p_cqe->rx_cqe_sp.type == CORE_RX_CQE_TYPE_REGULAR) + qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data); + else + qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data); + if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)) + DP_NOTICE(p_hwfn, + "Mismatch between active_descq and the LL2 Rx chain\n"); + + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + + data.connection_handle = p_ll2_conn->my_id; + data.cookie = p_pkt->cookie; + data.rx_buf_addr = p_pkt->rx_buf_addr; + data.b_last_packet = b_last_cqe; + + spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags); + p_ll2_conn->cbs.rx_comp_cb(p_ll2_conn->cbs.cookie, &data); + + spin_lock_irqsave(&p_rx->lock, *p_lock_flags); + + return 0; +} + +static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)cookie; + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + union core_rx_cqe_union *cqe = NULL; + u16 cq_new_idx = 0, cq_old_idx = 0; + unsigned long flags = 0; + int rc = 0; + + if (!p_ll2_conn) + return rc; + + spin_lock_irqsave(&p_rx->lock, flags); + + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) { + spin_unlock_irqrestore(&p_rx->lock, flags); + return 0; + } + + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + + while (cq_new_idx != cq_old_idx) { + bool b_last_cqe = (cq_new_idx == cq_old_idx); + + cqe = + (union core_rx_cqe_union *) + qed_chain_consume(&p_rx->rcq_chain); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "LL2 [sw. cons %04x, fw. at %04x] - Got Packet of type %02x\n", + cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type); + + switch (cqe->rx_cqe_sp.type) { + case CORE_RX_CQE_TYPE_SLOW_PATH: + rc = qed_ll2_handle_slowpath(p_hwfn, p_ll2_conn, + cqe, &flags); + break; + case CORE_RX_CQE_TYPE_GSI_OFFLOAD: + case CORE_RX_CQE_TYPE_REGULAR: + rc = qed_ll2_rxq_handle_completion(p_hwfn, p_ll2_conn, + cqe, &flags, + b_last_cqe); + break; + default: + rc = -EIO; + } + } + + spin_unlock_irqrestore(&p_rx->lock, flags); + return rc; +} + +static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle) +{ + struct qed_ll2_info *p_ll2_conn = NULL; + struct qed_ll2_rx_packet *p_pkt = NULL; + struct qed_ll2_rx_queue *p_rx; + unsigned long flags = 0; + + p_ll2_conn = qed_ll2_handle_sanity_inactive(p_hwfn, connection_handle); + if (!p_ll2_conn) + return; + + p_rx = &p_ll2_conn->rx_queue; + + spin_lock_irqsave(&p_rx->lock, flags); + while (!list_empty(&p_rx->active_descq)) { + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + if (!p_pkt) + break; + list_move_tail(&p_pkt->list_entry, &p_rx->free_descq); + spin_unlock_irqrestore(&p_rx->lock, flags); + + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) { + struct qed_ooo_buffer *p_buffer; + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + } else { + dma_addr_t rx_buf_addr = p_pkt->rx_buf_addr; + void *cookie = p_pkt->cookie; + bool b_last; + + b_last = list_empty(&p_rx->active_descq); + p_ll2_conn->cbs.rx_release_cb(p_ll2_conn->cbs.cookie, + p_ll2_conn->my_id, + cookie, + rx_buf_addr, b_last); + } + spin_lock_irqsave(&p_rx->lock, flags); + } + spin_unlock_irqrestore(&p_rx->lock, flags); +} + +static bool +qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn, + struct core_rx_slow_path_cqe *p_cqe) +{ + struct ooo_opaque *ooo_opq; + u32 cid; + + if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH) + return false; + + ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data; + if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES) + return false; + + /* Need to make a flush */ + cid = le32_to_cpu(ooo_opq->cid); + qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid); + + return true; +} + +static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + u16 packet_length = 0, parse_flags = 0, vlan = 0; + struct qed_ll2_rx_packet *p_pkt = NULL; + union core_rx_cqe_union *cqe = NULL; + u16 cq_new_idx = 0, cq_old_idx = 0; + struct qed_ooo_buffer *p_buffer; + struct ooo_opaque *ooo_opq; + u8 placement_offset = 0; + u8 cqe_type; + u32 cid; + + cq_new_idx = le16_to_cpu(*p_rx->p_fw_cons); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + if (cq_new_idx == cq_old_idx) + return 0; + + while (cq_new_idx != cq_old_idx) { + struct core_rx_fast_path_cqe *p_cqe_fp; + + cqe = qed_chain_consume(&p_rx->rcq_chain); + cq_old_idx = qed_chain_get_cons_idx(&p_rx->rcq_chain); + cqe_type = cqe->rx_cqe_sp.type; + + if (cqe_type == CORE_RX_CQE_TYPE_SLOW_PATH) + if (qed_ll2_lb_rxq_handler_slowpath(p_hwfn, + &cqe->rx_cqe_sp)) + continue; + + if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) { + DP_NOTICE(p_hwfn, + "Got a non-regular LB LL2 completion [type 0x%02x]\n", + cqe_type); + return -EINVAL; + } + p_cqe_fp = &cqe->rx_cqe_fp; + + placement_offset = p_cqe_fp->placement_offset; + parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags); + packet_length = le16_to_cpu(p_cqe_fp->packet_length); + vlan = le16_to_cpu(p_cqe_fp->vlan); + ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data; + qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq); + cid = le32_to_cpu(ooo_opq->cid); + + /* Process delete isle first */ + if (ooo_opq->drop_size) + qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid, + ooo_opq->drop_isle, + ooo_opq->drop_size); + + if (ooo_opq->ooo_opcode == TCP_EVENT_NOP) + continue; + + /* Now process create/add/join isles */ + if (unlikely(list_empty(&p_rx->active_descq))) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX chain has no submitted buffers\n" + ); + return -EIO; + } + + p_pkt = list_first_entry(&p_rx->active_descq, + struct qed_ll2_rx_packet, list_entry); + + if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT || + ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN || + ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) { + if (unlikely(!p_pkt)) { + DP_NOTICE(p_hwfn, + "LL2 OOO RX packet is not valid\n"); + return -EIO; + } + list_del(&p_pkt->list_entry); + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + p_buffer->packet_length = packet_length; + p_buffer->parse_flags = parse_flags; + p_buffer->vlan = vlan; + p_buffer->placement_offset = placement_offset; + qed_chain_consume(&p_rx->rxq_chain); + list_add_tail(&p_pkt->list_entry, &p_rx->free_descq); + + switch (ooo_opq->ooo_opcode) { + case TCP_EVENT_ADD_NEW_ISLE: + qed_ooo_add_new_isle(p_hwfn, + p_hwfn->p_ooo_info, + cid, + ooo_opq->ooo_isle, + p_buffer); + break; + case TCP_EVENT_ADD_ISLE_RIGHT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + ooo_opq->ooo_isle, + p_buffer, + QED_OOO_RIGHT_BUF); + break; + case TCP_EVENT_ADD_ISLE_LEFT: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + ooo_opq->ooo_isle, + p_buffer, + QED_OOO_LEFT_BUF); + break; + case TCP_EVENT_JOIN: + qed_ooo_add_new_buffer(p_hwfn, + p_hwfn->p_ooo_info, + cid, + ooo_opq->ooo_isle + 1, + p_buffer, + QED_OOO_LEFT_BUF); + qed_ooo_join_isles(p_hwfn, + p_hwfn->p_ooo_info, + cid, ooo_opq->ooo_isle); + break; + case TCP_EVENT_ADD_PEN: + qed_ooo_put_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info, + p_buffer, true); + break; + } + } else { + DP_NOTICE(p_hwfn, + "Unexpected event (%d) TX OOO completion\n", + ooo_opq->ooo_opcode); + } + } + + return 0; +} + +static void +qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_tx_pkt_info tx_pkt; + struct qed_ooo_buffer *p_buffer; + u16 l4_hdr_offset_w; + dma_addr_t first_frag; + u8 bd_flags; + int rc; + + /* Submit Tx buffers here */ + while ((p_buffer = qed_ooo_get_ready_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + l4_hdr_offset_w = 0; + bd_flags = 0; + + first_frag = p_buffer->rx_buffer_phys_addr + + p_buffer->placement_offset; + SET_FIELD(bd_flags, CORE_TX_BD_DATA_FORCE_VLAN_MODE, 1); + SET_FIELD(bd_flags, CORE_TX_BD_DATA_L4_PROTOCOL, 1); + + memset(&tx_pkt, 0, sizeof(tx_pkt)); + tx_pkt.num_of_bds = 1; + tx_pkt.vlan = p_buffer->vlan; + tx_pkt.bd_flags = bd_flags; + tx_pkt.l4_hdr_offset_w = l4_hdr_offset_w; + switch (p_ll2_conn->tx_dest) { + case CORE_TX_DEST_NW: + tx_pkt.tx_dest = QED_LL2_TX_DEST_NW; + break; + case CORE_TX_DEST_LB: + tx_pkt.tx_dest = QED_LL2_TX_DEST_LB; + break; + case CORE_TX_DEST_DROP: + default: + tx_pkt.tx_dest = QED_LL2_TX_DEST_DROP; + break; + } + tx_pkt.first_frag = first_frag; + tx_pkt.first_frag_len = p_buffer->packet_length; + tx_pkt.cookie = p_buffer; + + rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, + &tx_pkt, true); + if (rc) { + qed_ooo_put_ready_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer, false); + break; + } + } +} + +static void +qed_ooo_submit_rx_buffers(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + int rc; + + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + rc = qed_ll2_post_rx_buffer(p_hwfn, + p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, + 0, p_buffer, true); + if (rc) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + break; + } + } +} + +static int qed_ll2_lb_rxq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + int rc; + + if (!p_ll2_conn) + return 0; + + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + + rc = qed_ll2_lb_rxq_handler(p_hwfn, p_ll2_conn); + if (rc) + return rc; + + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie) +{ + struct qed_ll2_info *p_ll2_conn = (struct qed_ll2_info *)p_cookie; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_ll2_tx_packet *p_pkt = NULL; + struct qed_ooo_buffer *p_buffer; + bool b_dont_submit_rx = false; + u16 new_idx = 0, num_bds = 0; + int rc; + + if (unlikely(!p_ll2_conn)) + return 0; + + if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn))) + return 0; + + new_idx = le16_to_cpu(*p_tx->p_fw_cons); + num_bds = ((s16)new_idx - (s16)p_tx->bds_idx); + + if (unlikely(!num_bds)) + return 0; + + while (num_bds) { + if (list_empty(&p_tx->active_descq)) + return -EINVAL; + + p_pkt = list_first_entry(&p_tx->active_descq, + struct qed_ll2_tx_packet, list_entry); + if (unlikely(!p_pkt)) + return -EINVAL; + + if (unlikely(p_pkt->bd_used != 1)) { + DP_NOTICE(p_hwfn, + "Unexpectedly many BDs(%d) in TX OOO completion\n", + p_pkt->bd_used); + return -EINVAL; + } + + list_del(&p_pkt->list_entry); + + num_bds--; + p_tx->bds_idx++; + qed_chain_consume(&p_tx->txq_chain); + + p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + + if (b_dont_submit_rx) { + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, + p_buffer); + continue; + } + + rc = qed_ll2_post_rx_buffer(p_hwfn, p_ll2_conn->my_id, + p_buffer->rx_buffer_phys_addr, 0, + p_buffer, true); + if (rc != 0) { + qed_ooo_put_free_buffer(p_hwfn, + p_hwfn->p_ooo_info, p_buffer); + b_dont_submit_rx = true; + } + } + + qed_ooo_submit_tx_buffers(p_hwfn, p_ll2_conn); + + return 0; +} + +static void qed_ll2_stop_ooo(struct qed_hwfn *p_hwfn) +{ + u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + + DP_VERBOSE(p_hwfn, (QED_MSG_STORAGE | QED_MSG_LL2), + "Stopping LL2 OOO queue [%02x]\n", *handle); + + qed_ll2_terminate_connection(p_hwfn, *handle); + qed_ll2_release_connection(p_hwfn, *handle); + *handle = QED_LL2_UNUSED_HANDLE; +} + +static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn, + u8 action_on_error) +{ + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; + struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue; + struct core_rx_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 cqe_pbl_size; + int rc = 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_ll2_conn->cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + CORE_RAMROD_RX_QUEUE_START, + PROTOCOLID_CORE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.core_rx_queue_start; + memset(p_ramrod, 0, sizeof(*p_ramrod)); + p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); + p_ramrod->sb_index = p_rx->rx_sb_index; + p_ramrod->complete_event_flg = 1; + + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); + DMA_REGPAIR_LE(p_ramrod->bd_base, p_rx->rxq_chain.p_phys_addr); + cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain); + p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, + qed_chain_get_pbl_phys(&p_rx->rcq_chain)); + + p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; + p_ramrod->inner_vlan_stripping_en = + p_ll2_conn->input.rx_vlan_removal_en; + + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) + p_ramrod->report_outer_vlan = 1; + p_ramrod->queue_id = p_ll2_conn->queue_id; + p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; + + if (test_bit(QED_MF_LL2_NON_UNICAST, &p_hwfn->cdev->mf_bits) && + p_ramrod->main_func_queue && conn_type != QED_LL2_TYPE_ROCE && + conn_type != QED_LL2_TYPE_IWARP && + (!QED_IS_NVMETCP_PERSONALITY(p_hwfn))) { + p_ramrod->mf_si_bcast_accept_all = 1; + p_ramrod->mf_si_mcast_accept_all = 1; + } else { + p_ramrod->mf_si_bcast_accept_all = 0; + p_ramrod->mf_si_mcast_accept_all = 0; + } + + p_ramrod->action_on_error.error_type = action_on_error; + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; + p_ramrod->zero_prod_flg = 1; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + enum qed_ll2_conn_type conn_type = p_ll2_conn->input.conn_type; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct core_tx_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u16 pq_id = 0, pbl_size; + int rc = -EINVAL; + + if (!QED_LL2_TX_REGISTERED(p_ll2_conn)) + return 0; + + if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)) + p_ll2_conn->tx_stats_en = 0; + else + p_ll2_conn->tx_stats_en = 1; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_ll2_conn->cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + CORE_RAMROD_TX_QUEUE_START, + PROTOCOLID_CORE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.core_tx_queue_start; + + p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn)); + p_ramrod->sb_index = p_tx->tx_sb_index; + p_ramrod->mtu = cpu_to_le16(p_ll2_conn->input.mtu); + p_ramrod->stats_en = p_ll2_conn->tx_stats_en; + p_ramrod->stats_id = p_ll2_conn->tx_stats_id; + + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, + qed_chain_get_pbl_phys(&p_tx->txq_chain)); + pbl_size = qed_chain_get_page_cnt(&p_tx->txq_chain); + p_ramrod->pbl_size = cpu_to_le16(pbl_size); + + switch (p_ll2_conn->input.tx_tc) { + case PURE_LB_TC: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + break; + case PKT_LB_TC: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OOO); + break; + default: + pq_id = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + break; + } + + p_ramrod->qm_pq_id = cpu_to_le16(pq_id); + + switch (conn_type) { + case QED_LL2_TYPE_FCOE: + p_ramrod->conn_type = PROTOCOLID_FCOE; + break; + case QED_LL2_TYPE_TCP_ULP: + p_ramrod->conn_type = PROTOCOLID_TCP_ULP; + break; + case QED_LL2_TYPE_ROCE: + p_ramrod->conn_type = PROTOCOLID_ROCE; + break; + case QED_LL2_TYPE_IWARP: + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; + case QED_LL2_TYPE_OOO: + if (p_hwfn->hw_info.personality == QED_PCI_ISCSI || + p_hwfn->hw_info.personality == QED_PCI_NVMETCP) + p_ramrod->conn_type = PROTOCOLID_TCP_ULP; + else + p_ramrod->conn_type = PROTOCOLID_IWARP; + break; + default: + p_ramrod->conn_type = PROTOCOLID_ETH; + DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type); + } + + p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, + &p_tx->db_msg, DB_REC_WIDTH_32B, + DB_REC_KERNEL); + return rc; +} + +static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct core_rx_stop_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_ll2_conn->cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + CORE_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_CORE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.core_rx_queue_stop; + + p_ramrod->complete_event_flg = 1; + p_ramrod->queue_id = p_ll2_conn->queue_id; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_ll2_conn->cid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + CORE_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_CORE, &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int +qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info) +{ + struct qed_chain_init_params params = { + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.rx_num_desc, + }; + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_ll2_rx_packet *p_descq; + u32 capacity; + int rc = 0; + + if (!p_ll2_info->input.rx_num_desc) + goto out; + + params.mode = QED_CHAIN_MODE_NEXT_PTR; + params.elem_size = sizeof(struct core_rx_bd); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rxq_chain, ¶ms); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); + goto out; + } + + capacity = qed_chain_get_capacity(&p_ll2_info->rx_queue.rxq_chain); + p_descq = kcalloc(capacity, sizeof(struct qed_ll2_rx_packet), + GFP_KERNEL); + if (!p_descq) { + rc = -ENOMEM; + DP_NOTICE(p_hwfn, "Failed to allocate ll2 Rx desc\n"); + goto out; + } + p_ll2_info->rx_queue.descq_array = p_descq; + + params.mode = QED_CHAIN_MODE_PBL; + params.elem_size = sizeof(struct core_rx_fast_path_cqe); + + rc = qed_chain_alloc(cdev, &p_ll2_info->rx_queue.rcq_chain, ¶ms); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); + goto out; + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n", + p_ll2_info->input.conn_type, p_ll2_info->input.rx_num_desc); + +out: + return rc; +} + +static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info) +{ + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = p_ll2_info->input.tx_num_desc, + .elem_size = sizeof(struct core_tx_bd), + }; + struct qed_ll2_tx_packet *p_descq; + size_t desc_size; + u32 capacity; + int rc = 0; + + if (!p_ll2_info->input.tx_num_desc) + goto out; + + rc = qed_chain_alloc(p_hwfn->cdev, &p_ll2_info->tx_queue.txq_chain, + ¶ms); + if (rc) + goto out; + + capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain); + /* All bds_set elements are flexibily added. */ + desc_size = struct_size(p_descq, bds_set, + p_ll2_info->input.tx_max_bds_per_packet); + + p_descq = kcalloc(capacity, desc_size, GFP_KERNEL); + if (!p_descq) { + rc = -ENOMEM; + goto out; + } + p_ll2_info->tx_queue.descq_mem = p_descq; + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n", + p_ll2_info->input.conn_type, p_ll2_info->input.tx_num_desc); + +out: + if (rc) + DP_NOTICE(p_hwfn, + "Can't allocate memory for Tx LL2 with 0x%08x buffers\n", + p_ll2_info->input.tx_num_desc); + return rc; +} + +static int +qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_info, u16 mtu) +{ + struct qed_ooo_buffer *p_buf = NULL; + void *p_virt; + u16 buf_idx; + int rc = 0; + + if (p_ll2_info->input.conn_type != QED_LL2_TYPE_OOO) + return rc; + + /* Correct number of requested OOO buffers if needed */ + if (!p_ll2_info->input.rx_num_ooo_buffers) { + u16 num_desc = p_ll2_info->input.rx_num_desc; + + if (!num_desc) + return -EINVAL; + p_ll2_info->input.rx_num_ooo_buffers = num_desc * 2; + } + + for (buf_idx = 0; buf_idx < p_ll2_info->input.rx_num_ooo_buffers; + buf_idx++) { + p_buf = kzalloc(sizeof(*p_buf), GFP_KERNEL); + if (!p_buf) { + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_size = mtu + 26 + ETH_CACHE_LINE_SIZE; + p_buf->rx_buffer_size = (p_buf->rx_buffer_size + + ETH_CACHE_LINE_SIZE - 1) & + ~(ETH_CACHE_LINE_SIZE - 1); + p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_buf->rx_buffer_size, + &p_buf->rx_buffer_phys_addr, + GFP_KERNEL); + if (!p_virt) { + kfree(p_buf); + rc = -ENOMEM; + goto out; + } + + p_buf->rx_buffer_virt_addr = p_virt; + qed_ooo_put_free_buffer(p_hwfn, p_hwfn->p_ooo_info, p_buf); + } + + DP_VERBOSE(p_hwfn, QED_MSG_LL2, + "Allocated [%04x] LL2 OOO buffers [each of size 0x%08x]\n", + p_ll2_info->input.rx_num_ooo_buffers, p_buf->rx_buffer_size); + +out: + return rc; +} + +static int +qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs) +{ + if (!cbs || (!cbs->rx_comp_cb || + !cbs->rx_release_cb || + !cbs->tx_comp_cb || !cbs->tx_release_cb || !cbs->cookie)) + return -EINVAL; + + p_ll2_info->cbs.rx_comp_cb = cbs->rx_comp_cb; + p_ll2_info->cbs.rx_release_cb = cbs->rx_release_cb; + p_ll2_info->cbs.tx_comp_cb = cbs->tx_comp_cb; + p_ll2_info->cbs.tx_release_cb = cbs->tx_release_cb; + p_ll2_info->cbs.slowpath_cb = cbs->slowpath_cb; + p_ll2_info->cbs.cookie = cbs->cookie; + + return 0; +} + +static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn, + struct qed_ll2_acquire_data *data, + u8 *start_idx, u8 *last_idx) +{ + /* LL2 queues handles will be split as follows: + * First will be the legacy queues, and then the ctx based. + */ + if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { + *start_idx = QED_LL2_LEGACY_CONN_BASE_PF; + *last_idx = *start_idx + + QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF; + } else { + /* QED_LL2_RX_TYPE_CTX */ + *start_idx = QED_LL2_CTX_CONN_BASE_PF; + *last_idx = *start_idx + + QED_MAX_NUM_OF_CTX_LL2_CONNS_PF; + } +} + +static enum core_error_handle +qed_ll2_get_error_choice(enum qed_ll2_error_handle err) +{ + switch (err) { + case QED_LL2_DROP_PACKET: + return LL2_DROP_PACKET; + case QED_LL2_DO_NOTHING: + return LL2_DO_NOTHING; + case QED_LL2_ASSERT: + return LL2_ASSERT; + default: + return LL2_DO_NOTHING; + } +} + +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) +{ + struct qed_hwfn *p_hwfn = cxt; + qed_int_comp_cb_t comp_rx_cb, comp_tx_cb; + struct qed_ll2_info *p_ll2_info = NULL; + u8 i, first_idx, last_idx, *p_tx_max; + int rc; + + if (!data->p_connection_handle || !p_hwfn->p_ll2_info) + return -EINVAL; + + _qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx); + + /* Find a free connection to be used */ + for (i = first_idx; i < last_idx; i++) { + mutex_lock(&p_hwfn->p_ll2_info[i].mutex); + if (p_hwfn->p_ll2_info[i].b_active) { + mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); + continue; + } + + p_hwfn->p_ll2_info[i].b_active = true; + p_ll2_info = &p_hwfn->p_ll2_info[i]; + mutex_unlock(&p_hwfn->p_ll2_info[i].mutex); + break; + } + if (!p_ll2_info) + return -EBUSY; + + memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); + + switch (data->input.tx_dest) { + case QED_LL2_TX_DEST_NW: + p_ll2_info->tx_dest = CORE_TX_DEST_NW; + break; + case QED_LL2_TX_DEST_LB: + p_ll2_info->tx_dest = CORE_TX_DEST_LB; + break; + case QED_LL2_TX_DEST_DROP: + p_ll2_info->tx_dest = CORE_TX_DEST_DROP; + break; + default: + return -EINVAL; + } + + if (data->input.conn_type == QED_LL2_TYPE_OOO || + data->input.secondary_queue) + p_ll2_info->main_func_queue = false; + else + p_ll2_info->main_func_queue = true; + + /* Correct maximum number of Tx BDs */ + p_tx_max = &p_ll2_info->input.tx_max_bds_per_packet; + if (*p_tx_max == 0) + *p_tx_max = CORE_LL2_TX_MAX_BDS_PER_PACKET; + else + *p_tx_max = min_t(u8, *p_tx_max, + CORE_LL2_TX_MAX_BDS_PER_PACKET); + + rc = qed_ll2_set_cbs(p_ll2_info, data->cbs); + if (rc) { + DP_NOTICE(p_hwfn, "Invalid callback functions\n"); + goto q_allocate_fail; + } + + rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info); + if (rc) + goto q_allocate_fail; + + rc = qed_ll2_acquire_connection_tx(p_hwfn, p_ll2_info); + if (rc) + goto q_allocate_fail; + + rc = qed_ll2_acquire_connection_ooo(p_hwfn, p_ll2_info, + data->input.mtu); + if (rc) + goto q_allocate_fail; + + /* Register callbacks for the Rx/Tx queues */ + if (data->input.conn_type == QED_LL2_TYPE_OOO) { + comp_rx_cb = qed_ll2_lb_rxq_completion; + comp_tx_cb = qed_ll2_lb_txq_completion; + } else { + comp_rx_cb = qed_ll2_rxq_completion; + comp_tx_cb = qed_ll2_txq_completion; + } + + if (data->input.rx_num_desc) { + qed_int_register_cb(p_hwfn, comp_rx_cb, + &p_hwfn->p_ll2_info[i], + &p_ll2_info->rx_queue.rx_sb_index, + &p_ll2_info->rx_queue.p_fw_cons); + p_ll2_info->rx_queue.b_cb_registered = true; + } + + if (data->input.tx_num_desc) { + qed_int_register_cb(p_hwfn, + comp_tx_cb, + &p_hwfn->p_ll2_info[i], + &p_ll2_info->tx_queue.tx_sb_index, + &p_ll2_info->tx_queue.p_fw_cons); + p_ll2_info->tx_queue.b_cb_registered = true; + } + + *data->p_connection_handle = i; + return rc; + +q_allocate_fail: + qed_ll2_release_connection(p_hwfn, i); + return -ENOMEM; +} + +static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + enum qed_ll2_error_handle error_input; + enum core_error_handle error_mode; + u8 action_on_error = 0; + int rc; + + if (!QED_LL2_RX_REGISTERED(p_ll2_conn)) + return 0; + + DIRECT_REG_WR(p_ll2_conn->rx_queue.set_prod_addr, 0x0); + error_input = p_ll2_conn->input.ai_err_packet_too_big; + error_mode = qed_ll2_get_error_choice(error_input); + SET_FIELD(action_on_error, + CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG, error_mode); + error_input = p_ll2_conn->input.ai_err_no_buf; + error_mode = qed_ll2_get_error_choice(error_input); + SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode); + + rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error); + if (rc) + return rc; + + if (p_ll2_conn->rx_queue.ctx_based) { + rc = qed_db_recovery_add(p_hwfn->cdev, + p_ll2_conn->rx_queue.set_prod_addr, + &p_ll2_conn->rx_queue.db_data, + DB_REC_WIDTH_64B, DB_REC_KERNEL); + } + + return rc; +} + +static void +qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn); +} + +static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn, + u8 handle, + u8 ll2_queue_type) +{ + u8 qid; + + if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY) + return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle; + + /* QED_LL2_RX_TYPE_CTX + * FW distinguishes between the legacy queues (ram based) and the + * ctx based queues by the queue_id. + * The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy + * and the queue ids above that are ctx base. + */ + qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] + + MAX_NUM_LL2_RX_RAM_QUEUES; + + /* See comment on the acquire connection for how the ll2 + * queues handles are divided. + */ + qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF); + + return qid; +} + +int qed_ll2_establish_connection(void *cxt, u8 connection_handle) +{ + struct core_conn_context *p_cxt; + struct qed_ll2_tx_packet *p_pkt; + struct qed_ll2_info *p_ll2_conn; + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_rx_queue *p_rx; + struct qed_ll2_tx_queue *p_tx; + struct qed_cxt_info cxt_info; + struct qed_ptt *p_ptt; + int rc = -EINVAL; + u32 i, capacity; + size_t desc_size; + u8 qid, stats_id; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); + if (!p_ll2_conn) { + rc = -EINVAL; + goto out; + } + + p_rx = &p_ll2_conn->rx_queue; + p_tx = &p_ll2_conn->tx_queue; + + qed_chain_reset(&p_rx->rxq_chain); + qed_chain_reset(&p_rx->rcq_chain); + INIT_LIST_HEAD(&p_rx->active_descq); + INIT_LIST_HEAD(&p_rx->free_descq); + INIT_LIST_HEAD(&p_rx->posting_descq); + spin_lock_init(&p_rx->lock); + capacity = qed_chain_get_capacity(&p_rx->rxq_chain); + for (i = 0; i < capacity; i++) + list_add_tail(&p_rx->descq_array[i].list_entry, + &p_rx->free_descq); + *p_rx->p_fw_cons = 0; + + qed_chain_reset(&p_tx->txq_chain); + INIT_LIST_HEAD(&p_tx->active_descq); + INIT_LIST_HEAD(&p_tx->free_descq); + INIT_LIST_HEAD(&p_tx->sending_descq); + spin_lock_init(&p_tx->lock); + capacity = qed_chain_get_capacity(&p_tx->txq_chain); + /* All bds_set elements are flexibily added. */ + desc_size = struct_size(p_pkt, bds_set, + p_ll2_conn->input.tx_max_bds_per_packet); + + for (i = 0; i < capacity; i++) { + p_pkt = p_tx->descq_mem + desc_size * i; + list_add_tail(&p_pkt->list_entry, &p_tx->free_descq); + } + p_tx->cur_completing_bd_idx = 0; + p_tx->bds_idx = 0; + p_tx->b_completing_packet = false; + p_tx->cur_send_packet = NULL; + p_tx->cur_send_frag_num = 0; + p_tx->cur_completing_frag_num = 0; + *p_tx->p_fw_cons = 0; + + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid); + if (rc) + goto out; + cxt_info.iid = p_ll2_conn->cid; + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + if (rc) { + DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", + p_ll2_conn->cid); + goto out; + } + + p_cxt = cxt_info.p_cxt; + + memset(p_cxt, 0, sizeof(*p_cxt)); + + qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle, + p_ll2_conn->input.rx_conn_type); + stats_id = qed_ll2_handle_to_stats_id(p_hwfn, + p_ll2_conn->input.rx_conn_type, + qid); + p_ll2_conn->queue_id = qid; + p_ll2_conn->tx_stats_id = stats_id; + + /* If there is no valid stats id for this connection, disable stats */ + if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) { + p_ll2_conn->tx_stats_en = 0; + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Disabling stats for queue %d - not enough counters\n", + qid); + } + + DP_VERBOSE(p_hwfn, + QED_MSG_LL2, + "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n", + p_hwfn->rel_pf_id, + p_ll2_conn->input.rx_conn_type, qid, stats_id); + + if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) { + p_rx->set_prod_addr = + (u8 __iomem *)p_hwfn->regview + + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM, + TSTORM_LL2_RX_PRODS, qid); + } else { + /* QED_LL2_RX_TYPE_CTX - using doorbell */ + p_rx->ctx_based = 1; + + p_rx->set_prod_addr = p_hwfn->doorbells + + p_hwfn->dpi_start_offset + + DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE); + + /* prepare db data */ + p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid); + SET_FIELD(p_rx->db_data.params, + CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_rx->db_data.params, + CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0); + } + + p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(p_ll2_conn->cid, + DQ_DEMS_LEGACY); + /* prepare db data */ + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_TX_BD_PROD_CMD); + p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); + if (rc) + goto out; + + rc = qed_sp_ll2_tx_queue_start(p_hwfn, p_ll2_conn); + if (rc) + goto out; + + if (!QED_IS_RDMA_PERSONALITY(p_hwfn) && + !QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + qed_wr(p_hwfn, p_ptt, PRS_REG_USE_LIGHT_L2, 1); + + qed_ll2_establish_connection_ooo(p_hwfn, p_ll2_conn); + + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_add_protocol_filter(p_hwfn->cdev, 0, + QED_LLH_FILTER_ETHERTYPE, + ETH_P_FCOE, 0); + qed_llh_add_protocol_filter(p_hwfn->cdev, 0, + QED_LLH_FILTER_ETHERTYPE, + ETH_P_FIP, 0); + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn, + struct qed_ll2_rx_queue *p_rx, + struct qed_ll2_rx_packet *p_curp) +{ + struct qed_ll2_rx_packet *p_posting_packet = NULL; + struct core_ll2_rx_prod rx_prod = { 0, 0 }; + bool b_notify_fw = false; + u16 bd_prod, cq_prod; + + /* This handles the flushing of already posted buffers */ + while (!list_empty(&p_rx->posting_descq)) { + p_posting_packet = list_first_entry(&p_rx->posting_descq, + struct qed_ll2_rx_packet, + list_entry); + list_move_tail(&p_posting_packet->list_entry, + &p_rx->active_descq); + b_notify_fw = true; + } + + /* This handles the supplied packet [if there is one] */ + if (p_curp) { + list_add_tail(&p_curp->list_entry, &p_rx->active_descq); + b_notify_fw = true; + } + + if (!b_notify_fw) + return; + + bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain); + cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain); + if (p_rx->ctx_based) { + /* update producer by giving a doorbell */ + p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod); + p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod); + /* Make sure chain element is updated before ringing the + * doorbell + */ + dma_wmb(); + DIRECT_REG_WR64(p_rx->set_prod_addr, + *((u64 *)&p_rx->db_data)); + } else { + rx_prod.bd_prod = cpu_to_le16(bd_prod); + rx_prod.cqe_prod = cpu_to_le16(cq_prod); + + /* Make sure chain element is updated before ringing the + * doorbell + */ + dma_wmb(); + + DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod)); + } +} + +int qed_ll2_post_rx_buffer(void *cxt, + u8 connection_handle, + dma_addr_t addr, + u16 buf_len, void *cookie, u8 notify_fw) +{ + struct qed_hwfn *p_hwfn = cxt; + struct core_rx_bd_with_buff_len *p_curb = NULL; + struct qed_ll2_rx_packet *p_curp = NULL; + struct qed_ll2_info *p_ll2_conn; + struct qed_ll2_rx_queue *p_rx; + unsigned long flags; + void *p_data; + int rc = 0; + + p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); + if (!p_ll2_conn) + return -EINVAL; + p_rx = &p_ll2_conn->rx_queue; + if (!p_rx->set_prod_addr) + return -EIO; + + spin_lock_irqsave(&p_rx->lock, flags); + if (!list_empty(&p_rx->free_descq)) + p_curp = list_first_entry(&p_rx->free_descq, + struct qed_ll2_rx_packet, list_entry); + if (p_curp) { + if (qed_chain_get_elem_left(&p_rx->rxq_chain) && + qed_chain_get_elem_left(&p_rx->rcq_chain)) { + p_data = qed_chain_produce(&p_rx->rxq_chain); + p_curb = (struct core_rx_bd_with_buff_len *)p_data; + qed_chain_produce(&p_rx->rcq_chain); + } + } + + /* If we're lacking entries, let's try to flush buffers to FW */ + if (!p_curp || !p_curb) { + rc = -EBUSY; + p_curp = NULL; + goto out_notify; + } + + /* We have an Rx packet we can fill */ + DMA_REGPAIR_LE(p_curb->addr, addr); + p_curb->buff_length = cpu_to_le16(buf_len); + p_curp->rx_buf_addr = addr; + p_curp->cookie = cookie; + p_curp->rxq_bd = p_curb; + p_curp->buf_length = buf_len; + list_del(&p_curp->list_entry); + + /* Check if we only want to enqueue this packet without informing FW */ + if (!notify_fw) { + list_add_tail(&p_curp->list_entry, &p_rx->posting_descq); + goto out; + } + +out_notify: + qed_ll2_post_rx_buffer_notify_fw(p_hwfn, p_rx, p_curp); +out: + spin_unlock_irqrestore(&p_rx->lock, flags); + return rc; +} + +static void qed_ll2_prepare_tx_packet_set(struct qed_hwfn *p_hwfn, + struct qed_ll2_tx_queue *p_tx, + struct qed_ll2_tx_packet *p_curp, + struct qed_ll2_tx_pkt_info *pkt, + u8 notify_fw) +{ + list_del(&p_curp->list_entry); + p_curp->cookie = pkt->cookie; + p_curp->bd_used = pkt->num_of_bds; + p_curp->notify_fw = notify_fw; + p_tx->cur_send_packet = p_curp; + p_tx->cur_send_frag_num = 0; + + p_curp->bds_set[p_tx->cur_send_frag_num].tx_frag = pkt->first_frag; + p_curp->bds_set[p_tx->cur_send_frag_num].frag_len = pkt->first_frag_len; + p_tx->cur_send_frag_num++; +} + +static void +qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2, + struct qed_ll2_tx_packet *p_curp, + struct qed_ll2_tx_pkt_info *pkt) +{ + struct qed_chain *p_tx_chain = &p_ll2->tx_queue.txq_chain; + u16 prod_idx = qed_chain_get_prod_idx(p_tx_chain); + struct core_tx_bd *start_bd = NULL; + enum core_roce_flavor_type roce_flavor; + enum core_tx_dest tx_dest; + u16 bd_data = 0, frag_idx; + u16 bitfield1; + + roce_flavor = (pkt->qed_roce_flavor == QED_LL2_ROCE) ? CORE_ROCE + : CORE_RROCE; + + switch (pkt->tx_dest) { + case QED_LL2_TX_DEST_NW: + tx_dest = CORE_TX_DEST_NW; + break; + case QED_LL2_TX_DEST_LB: + tx_dest = CORE_TX_DEST_LB; + break; + case QED_LL2_TX_DEST_DROP: + tx_dest = CORE_TX_DEST_DROP; + break; + default: + tx_dest = CORE_TX_DEST_LB; + break; + } + + start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); + if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) && + p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) { + start_bd->nw_vlan_or_lb_echo = + cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE); + } else { + start_bd->nw_vlan_or_lb_echo = cpu_to_le16(pkt->vlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) && + p_ll2->input.conn_type == QED_LL2_TYPE_FCOE) + pkt->remove_stag = true; + } + + bitfield1 = le16_to_cpu(start_bd->bitfield1); + SET_FIELD(bitfield1, CORE_TX_BD_L4_HDR_OFFSET_W, pkt->l4_hdr_offset_w); + SET_FIELD(bitfield1, CORE_TX_BD_TX_DST, tx_dest); + start_bd->bitfield1 = cpu_to_le16(bitfield1); + + bd_data |= pkt->bd_flags; + SET_FIELD(bd_data, CORE_TX_BD_DATA_START_BD, 0x1); + SET_FIELD(bd_data, CORE_TX_BD_DATA_NBDS, pkt->num_of_bds); + SET_FIELD(bd_data, CORE_TX_BD_DATA_ROCE_FLAV, roce_flavor); + SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_CSUM, !!(pkt->enable_ip_cksum)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_L4_CSUM, !!(pkt->enable_l4_cksum)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_IP_LEN, !!(pkt->calc_ip_len)); + SET_FIELD(bd_data, CORE_TX_BD_DATA_DISABLE_STAG_INSERTION, + !!(pkt->remove_stag)); + + start_bd->bd_data.as_bitfield = cpu_to_le16(bd_data); + DMA_REGPAIR_LE(start_bd->addr, pkt->first_frag); + start_bd->nbytes = cpu_to_le16(pkt->first_frag_len); + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), + "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n", + p_ll2->queue_id, + p_ll2->cid, + p_ll2->input.conn_type, + prod_idx, + pkt->first_frag_len, + pkt->num_of_bds, + le32_to_cpu(start_bd->addr.hi), + le32_to_cpu(start_bd->addr.lo)); + + if (p_ll2->tx_queue.cur_send_frag_num == pkt->num_of_bds) + return; + + /* Need to provide the packet with additional BDs for frags */ + for (frag_idx = p_ll2->tx_queue.cur_send_frag_num; + frag_idx < pkt->num_of_bds; frag_idx++) { + struct core_tx_bd **p_bd = &p_curp->bds_set[frag_idx].txq_bd; + + *p_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain); + (*p_bd)->bd_data.as_bitfield = 0; + (*p_bd)->bitfield1 = 0; + p_curp->bds_set[frag_idx].tx_frag = 0; + p_curp->bds_set[frag_idx].frag_len = 0; + } +} + +/* This should be called while the Txq spinlock is being held */ +static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; + struct qed_ll2_tx_packet *p_pkt = NULL; + u16 bd_prod; + + /* If there are missing BDs, don't do anything now */ + if (p_ll2_conn->tx_queue.cur_send_frag_num != + p_ll2_conn->tx_queue.cur_send_packet->bd_used) + return; + + /* Push the current packet to the list and clean after it */ + list_add_tail(&p_ll2_conn->tx_queue.cur_send_packet->list_entry, + &p_ll2_conn->tx_queue.sending_descq); + p_ll2_conn->tx_queue.cur_send_packet = NULL; + p_ll2_conn->tx_queue.cur_send_frag_num = 0; + + /* Notify FW of packet only if requested to */ + if (!b_notify) + return; + + bd_prod = qed_chain_get_prod_idx(&p_ll2_conn->tx_queue.txq_chain); + + while (!list_empty(&p_tx->sending_descq)) { + p_pkt = list_first_entry(&p_tx->sending_descq, + struct qed_ll2_tx_packet, list_entry); + if (!p_pkt) + break; + + list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); + } + + p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); + + /* Make sure the BDs data is updated before ringing the doorbell */ + wmb(); + + DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), + "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", + p_ll2_conn->queue_id, + p_ll2_conn->cid, + p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); +} + +int qed_ll2_prepare_tx_packet(void *cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_tx_packet *p_curp = NULL; + struct qed_ll2_info *p_ll2_conn = NULL; + struct qed_ll2_tx_queue *p_tx; + struct qed_chain *p_tx_chain; + unsigned long flags; + int rc = 0; + + p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); + if (unlikely(!p_ll2_conn)) + return -EINVAL; + p_tx = &p_ll2_conn->tx_queue; + p_tx_chain = &p_tx->txq_chain; + + if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)) + return -EIO; + + spin_lock_irqsave(&p_tx->lock, flags); + if (unlikely(p_tx->cur_send_packet)) { + rc = -EEXIST; + goto out; + } + + /* Get entry, but only if we have tx elements for it */ + if (unlikely(!list_empty(&p_tx->free_descq))) + p_curp = list_first_entry(&p_tx->free_descq, + struct qed_ll2_tx_packet, list_entry); + if (unlikely(p_curp && + qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)) + p_curp = NULL; + + if (unlikely(!p_curp)) { + rc = -EBUSY; + goto out; + } + + /* Prepare packet and BD, and perhaps send a doorbell to FW */ + qed_ll2_prepare_tx_packet_set(p_hwfn, p_tx, p_curp, pkt, notify_fw); + + qed_ll2_prepare_tx_packet_set_bd(p_hwfn, p_ll2_conn, p_curp, pkt); + + qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); + +out: + spin_unlock_irqrestore(&p_tx->lock, flags); + return rc; +} + +int qed_ll2_set_fragment_of_tx_packet(void *cxt, + u8 connection_handle, + dma_addr_t addr, u16 nbytes) +{ + struct qed_ll2_tx_packet *p_cur_send_packet = NULL; + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_info *p_ll2_conn = NULL; + u16 cur_send_frag_num = 0; + struct core_tx_bd *p_bd; + unsigned long flags; + + p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); + if (unlikely(!p_ll2_conn)) + return -EINVAL; + + if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet)) + return -EINVAL; + + p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet; + cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num; + + if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used)) + return -EINVAL; + + /* Fill the BD information, and possibly notify FW */ + p_bd = p_cur_send_packet->bds_set[cur_send_frag_num].txq_bd; + DMA_REGPAIR_LE(p_bd->addr, addr); + p_bd->nbytes = cpu_to_le16(nbytes); + p_cur_send_packet->bds_set[cur_send_frag_num].tx_frag = addr; + p_cur_send_packet->bds_set[cur_send_frag_num].frag_len = nbytes; + + p_ll2_conn->tx_queue.cur_send_frag_num++; + + spin_lock_irqsave(&p_ll2_conn->tx_queue.lock, flags); + qed_ll2_tx_packet_notify(p_hwfn, p_ll2_conn); + spin_unlock_irqrestore(&p_ll2_conn->tx_queue.lock, flags); + + return 0; +} + +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_info *p_ll2_conn = NULL; + int rc = -EINVAL; + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + p_ll2_conn = qed_ll2_handle_sanity_lock(p_hwfn, connection_handle); + if (!p_ll2_conn) { + rc = -EINVAL; + goto out; + } + + /* Stop Tx & Rx of connection, if needed */ + if (QED_LL2_TX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->tx_queue.b_cb_registered = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ + rc = qed_sp_ll2_tx_queue_stop(p_hwfn, p_ll2_conn); + if (rc) + goto out; + + qed_ll2_txq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->tx_queue.tx_sb_index); + } + + if (QED_LL2_RX_REGISTERED(p_ll2_conn)) { + p_ll2_conn->rx_queue.b_cb_registered = false; + smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */ + + if (p_ll2_conn->rx_queue.ctx_based) + qed_db_recovery_del(p_hwfn->cdev, + p_ll2_conn->rx_queue.set_prod_addr, + &p_ll2_conn->rx_queue.db_data); + + rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn); + if (rc) + goto out; + + qed_ll2_rxq_flush(p_hwfn, connection_handle); + qed_int_unregister_cb(p_hwfn, p_ll2_conn->rx_queue.rx_sb_index); + } + + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO) + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + + if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_FCOE) { + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, + QED_LLH_FILTER_ETHERTYPE, + ETH_P_FCOE, 0); + qed_llh_remove_protocol_filter(p_hwfn->cdev, 0, + QED_LLH_FILTER_ETHERTYPE, + ETH_P_FIP, 0); + } + +out: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_info *p_ll2_conn) +{ + struct qed_ooo_buffer *p_buffer; + + if (p_ll2_conn->input.conn_type != QED_LL2_TYPE_OOO) + return; + + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + while ((p_buffer = qed_ooo_get_free_buffer(p_hwfn, + p_hwfn->p_ooo_info))) { + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } +} + +void qed_ll2_release_connection(void *cxt, u8 connection_handle) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_info *p_ll2_conn = NULL; + + p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle); + if (!p_ll2_conn) + return; + + kfree(p_ll2_conn->tx_queue.descq_mem); + qed_chain_free(p_hwfn->cdev, &p_ll2_conn->tx_queue.txq_chain); + + kfree(p_ll2_conn->rx_queue.descq_array); + qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rxq_chain); + qed_chain_free(p_hwfn->cdev, &p_ll2_conn->rx_queue.rcq_chain); + + qed_cxt_release_cid(p_hwfn, p_ll2_conn->cid); + + qed_ll2_release_connection_ooo(p_hwfn, p_ll2_conn); + + mutex_lock(&p_ll2_conn->mutex); + p_ll2_conn->b_active = false; + mutex_unlock(&p_ll2_conn->mutex); +} + +int qed_ll2_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_ll2_info *p_ll2_connections; + u8 i; + + /* Allocate LL2's set struct */ + p_ll2_connections = kcalloc(QED_MAX_NUM_OF_LL2_CONNECTIONS, + sizeof(struct qed_ll2_info), GFP_KERNEL); + if (!p_ll2_connections) { + DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_ll2'\n"); + return -ENOMEM; + } + + for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) + p_ll2_connections[i].my_id = i; + + p_hwfn->p_ll2_info = p_ll2_connections; + return 0; +} + +void qed_ll2_setup(struct qed_hwfn *p_hwfn) +{ + int i; + + for (i = 0; i < QED_MAX_NUM_OF_LL2_CONNECTIONS; i++) + mutex_init(&p_hwfn->p_ll2_info[i].mutex); +} + +void qed_ll2_free(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_ll2_info) + return; + + kfree(p_hwfn->p_ll2_info); + p_hwfn->p_ll2_info = NULL; +} + +static void _qed_ll2_get_port_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_ll2_stats *p_stats) +{ + struct core_ll2_port_stats port_stats; + + memset(&port_stats, 0, sizeof(port_stats)); + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + BAR0_MAP_REG_TSDM_RAM + + TSTORM_LL2_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)), + sizeof(port_stats)); + + p_stats->gsi_invalid_hdr += HILO_64_REGPAIR(port_stats.gsi_invalid_hdr); + p_stats->gsi_invalid_pkt_length += + HILO_64_REGPAIR(port_stats.gsi_invalid_pkt_length); + p_stats->gsi_unsupported_pkt_typ += + HILO_64_REGPAIR(port_stats.gsi_unsupported_pkt_typ); + p_stats->gsi_crcchksm_error += + HILO_64_REGPAIR(port_stats.gsi_crcchksm_error); +} + +static void _qed_ll2_get_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_ll2_info *p_ll2_conn, + struct qed_ll2_stats *p_stats) +{ + struct core_ll2_tstorm_per_queue_stat tstats; + u8 qid = p_ll2_conn->queue_id; + u32 tstats_addr; + + memset(&tstats, 0, sizeof(tstats)); + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(qid); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, sizeof(tstats)); + + p_stats->packet_too_big_discard += + HILO_64_REGPAIR(tstats.packet_too_big_discard); + p_stats->no_buff_discard += HILO_64_REGPAIR(tstats.no_buff_discard); +} + +static void _qed_ll2_get_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_ll2_info *p_ll2_conn, + struct qed_ll2_stats *p_stats) +{ + struct core_ll2_ustorm_per_queue_stat ustats; + u8 qid = p_ll2_conn->queue_id; + u32 ustats_addr; + + memset(&ustats, 0, sizeof(ustats)); + ustats_addr = BAR0_MAP_REG_USDM_RAM + + CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(qid); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, sizeof(ustats)); + + p_stats->rcv_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rcv_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rcv_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rcv_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rcv_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rcv_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void _qed_ll2_get_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_ll2_info *p_ll2_conn, + struct qed_ll2_stats *p_stats) +{ + struct core_ll2_pstorm_per_queue_stat pstats; + u8 stats_id = p_ll2_conn->tx_stats_id; + u32 pstats_addr; + + memset(&pstats, 0, sizeof(pstats)); + pstats_addr = BAR0_MAP_REG_PSDM_RAM + + CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(stats_id); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, sizeof(pstats)); + + p_stats->sent_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->sent_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->sent_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->sent_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->sent_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->sent_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); +} + +static int __qed_ll2_get_stats(void *cxt, u8 connection_handle, + struct qed_ll2_stats *p_stats) +{ + struct qed_hwfn *p_hwfn = cxt; + struct qed_ll2_info *p_ll2_conn = NULL; + struct qed_ptt *p_ptt; + + if ((connection_handle >= QED_MAX_NUM_OF_LL2_CONNECTIONS) || + !p_hwfn->p_ll2_info) + return -EINVAL; + + p_ll2_conn = &p_hwfn->p_ll2_info[connection_handle]; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + return -EINVAL; + } + + if (p_ll2_conn->input.gsi_enable) + _qed_ll2_get_port_stats(p_hwfn, p_ptt, p_stats); + + _qed_ll2_get_tstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); + + _qed_ll2_get_ustats(p_hwfn, p_ptt, p_ll2_conn, p_stats); + + if (p_ll2_conn->tx_stats_en) + _qed_ll2_get_pstats(p_hwfn, p_ptt, p_ll2_conn, p_stats); + + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_ll2_get_stats(void *cxt, + u8 connection_handle, struct qed_ll2_stats *p_stats) +{ + memset(p_stats, 0, sizeof(*p_stats)); + return __qed_ll2_get_stats(cxt, connection_handle, p_stats); +} + +static void qed_ll2b_release_rx_packet(void *cxt, + u8 connection_handle, + void *cookie, + dma_addr_t rx_buf_addr, + bool b_last_packet) +{ + struct qed_hwfn *p_hwfn = cxt; + + qed_ll2_dealloc_buffer(p_hwfn->cdev, cookie); +} + +static void qed_ll2_register_cb_ops(struct qed_dev *cdev, + const struct qed_ll2_cb_ops *ops, + void *cookie) +{ + cdev->ll2->cbs = ops; + cdev->ll2->cb_cookie = cookie; +} + +static struct qed_ll2_cbs ll2_cbs = { + .rx_comp_cb = &qed_ll2b_complete_rx_packet, + .rx_release_cb = &qed_ll2b_release_rx_packet, + .tx_comp_cb = &qed_ll2b_complete_tx_packet, + .tx_release_cb = &qed_ll2b_complete_tx_packet, +}; + +static void qed_ll2_set_conn_data(struct qed_hwfn *p_hwfn, + struct qed_ll2_acquire_data *data, + struct qed_ll2_params *params, + enum qed_ll2_conn_type conn_type, + u8 *handle, bool lb) +{ + memset(data, 0, sizeof(*data)); + + data->input.conn_type = conn_type; + data->input.mtu = params->mtu; + data->input.rx_num_desc = QED_LL2_RX_SIZE; + data->input.rx_drop_ttl0_flg = params->drop_ttl0_packets; + data->input.rx_vlan_removal_en = params->rx_vlan_stripping; + data->input.tx_num_desc = QED_LL2_TX_SIZE; + data->p_connection_handle = handle; + data->cbs = &ll2_cbs; + ll2_cbs.cookie = p_hwfn; + + if (lb) { + data->input.tx_tc = PKT_LB_TC; + data->input.tx_dest = QED_LL2_TX_DEST_LB; + } else { + data->input.tx_tc = 0; + data->input.tx_dest = QED_LL2_TX_DEST_NW; + } +} + +static int qed_ll2_start_ooo(struct qed_hwfn *p_hwfn, + struct qed_ll2_params *params) +{ + u8 *handle = &p_hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id; + struct qed_ll2_acquire_data data; + int rc; + + qed_ll2_set_conn_data(p_hwfn, &data, params, + QED_LL2_TYPE_OOO, handle, true); + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) { + DP_INFO(p_hwfn, "Failed to acquire LL2 OOO connection\n"); + goto out; + } + + rc = qed_ll2_establish_connection(p_hwfn, *handle); + if (rc) { + DP_INFO(p_hwfn, "Failed to establish LL2 OOO connection\n"); + goto fail; + } + + return 0; + +fail: + qed_ll2_release_connection(p_hwfn, *handle); +out: + *handle = QED_LL2_UNUSED_HANDLE; + return rc; +} + +static bool qed_ll2_is_storage_eng1(struct qed_dev *cdev) +{ + return (QED_IS_FCOE_PERSONALITY(QED_LEADING_HWFN(cdev)) || + QED_IS_ISCSI_PERSONALITY(QED_LEADING_HWFN(cdev)) || + QED_IS_NVMETCP_PERSONALITY(QED_LEADING_HWFN(cdev))) && + (QED_AFFIN_HWFN(cdev) != QED_LEADING_HWFN(cdev)); +} + +static int __qed_ll2_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + int rc; + + rc = qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); + if (rc) + DP_INFO(cdev, "Failed to terminate LL2 connection\n"); + + qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); + + return rc; +} + +static int qed_ll2_stop(struct qed_dev *cdev) +{ + bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); + struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); + int rc = 0, rc2 = 0; + + if (cdev->ll2->handle == QED_LL2_UNUSED_HANDLE) + return 0; + if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); + + qed_llh_remove_mac_filter(cdev, 0, cdev->ll2_mac_address); + eth_zero_addr(cdev->ll2_mac_address); + + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + qed_ll2_stop_ooo(p_hwfn); + + /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ + if (b_is_storage_eng1) { + rc2 = __qed_ll2_stop(QED_LEADING_HWFN(cdev)); + if (rc2) + DP_NOTICE(QED_LEADING_HWFN(cdev), + "Failed to stop LL2 on engine 0\n"); + } + + rc = __qed_ll2_stop(p_hwfn); + if (rc) + DP_NOTICE(p_hwfn, "Failed to stop LL2\n"); + + qed_ll2_kill_buffers(cdev); + + cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; + + return rc | rc2; +} + +static int __qed_ll2_start(struct qed_hwfn *p_hwfn, + struct qed_ll2_params *params) +{ + struct qed_ll2_buffer *buffer, *tmp_buffer; + struct qed_dev *cdev = p_hwfn->cdev; + enum qed_ll2_conn_type conn_type; + struct qed_ll2_acquire_data data; + int rc, rx_cnt; + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_FCOE: + conn_type = QED_LL2_TYPE_FCOE; + break; + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + conn_type = QED_LL2_TYPE_TCP_ULP; + break; + case QED_PCI_ETH_ROCE: + conn_type = QED_LL2_TYPE_ROCE; + break; + default: + + conn_type = QED_LL2_TYPE_TEST; + } + + qed_ll2_set_conn_data(p_hwfn, &data, params, conn_type, + &cdev->ll2->handle, false); + + rc = qed_ll2_acquire_connection(p_hwfn, &data); + if (rc) { + DP_INFO(p_hwfn, "Failed to acquire LL2 connection\n"); + return rc; + } + + rc = qed_ll2_establish_connection(p_hwfn, cdev->ll2->handle); + if (rc) { + DP_INFO(p_hwfn, "Failed to establish LL2 connection\n"); + goto release_conn; + } + + /* Post all Rx buffers to FW */ + spin_lock_bh(&cdev->ll2->lock); + rx_cnt = cdev->ll2->rx_cnt; + list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) { + rc = qed_ll2_post_rx_buffer(p_hwfn, + cdev->ll2->handle, + buffer->phys_addr, 0, buffer, 1); + if (rc) { + DP_INFO(p_hwfn, + "Failed to post an Rx buffer; Deleting it\n"); + dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr, + cdev->ll2->rx_size, DMA_FROM_DEVICE); + kfree(buffer->data); + list_del(&buffer->list); + kfree(buffer); + } else { + rx_cnt++; + } + } + spin_unlock_bh(&cdev->ll2->lock); + + if (rx_cnt == cdev->ll2->rx_cnt) { + DP_NOTICE(p_hwfn, "Failed passing even a single Rx buffer\n"); + goto terminate_conn; + } + cdev->ll2->rx_cnt = rx_cnt; + + return 0; + +terminate_conn: + qed_ll2_terminate_connection(p_hwfn, cdev->ll2->handle); +release_conn: + qed_ll2_release_connection(p_hwfn, cdev->ll2->handle); + return rc; +} + +static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) +{ + bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); + struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); + struct qed_ll2_buffer *buffer; + int rx_num_desc, i, rc; + + if (!is_valid_ether_addr(params->ll2_mac_address)) { + DP_NOTICE(cdev, "Invalid Ethernet address\n"); + return -EINVAL; + } + + WARN_ON(!cdev->ll2->cbs); + + /* Initialize LL2 locks & lists */ + INIT_LIST_HEAD(&cdev->ll2->list); + spin_lock_init(&cdev->ll2->lock); + + cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN + + L1_CACHE_BYTES + params->mtu; + + /* Allocate memory for LL2. + * In CMT mode, in case of a storage PF which is affintized to engine 1, + * LL2 is started also on engine 0 and thus we need twofold buffers. + */ + rx_num_desc = QED_LL2_RX_SIZE * (b_is_storage_eng1 ? 2 : 1); + DP_INFO(cdev, "Allocating %d LL2 buffers of size %08x bytes\n", + rx_num_desc, cdev->ll2->rx_size); + for (i = 0; i < rx_num_desc; i++) { + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) { + DP_INFO(cdev, "Failed to allocate LL2 buffers\n"); + rc = -ENOMEM; + goto err0; + } + + rc = qed_ll2_alloc_buffer(cdev, (u8 **)&buffer->data, + &buffer->phys_addr); + if (rc) { + kfree(buffer); + goto err0; + } + + list_add_tail(&buffer->list, &cdev->ll2->list); + } + + rc = __qed_ll2_start(p_hwfn, params); + if (rc) { + DP_NOTICE(cdev, "Failed to start LL2\n"); + goto err0; + } + + /* In CMT mode, always need to start LL2 on engine 0 for a storage PF, + * since broadcast/mutlicast packets are routed to engine 0. + */ + if (b_is_storage_eng1) { + rc = __qed_ll2_start(QED_LEADING_HWFN(cdev), params); + if (rc) { + DP_NOTICE(QED_LEADING_HWFN(cdev), + "Failed to start LL2 on engine 0\n"); + goto err1; + } + } + + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { + DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); + rc = qed_ll2_start_ooo(p_hwfn, params); + if (rc) { + DP_NOTICE(cdev, "Failed to start OOO LL2\n"); + goto err2; + } + } + + if (!QED_IS_NVMETCP_PERSONALITY(p_hwfn)) { + rc = qed_llh_add_mac_filter(cdev, 0, params->ll2_mac_address); + if (rc) { + DP_NOTICE(cdev, "Failed to add an LLH filter\n"); + goto err3; + } + } + + ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address); + + return 0; + +err3: + if (QED_IS_ISCSI_PERSONALITY(p_hwfn) || QED_IS_NVMETCP_PERSONALITY(p_hwfn)) + qed_ll2_stop_ooo(p_hwfn); +err2: + if (b_is_storage_eng1) + __qed_ll2_stop(QED_LEADING_HWFN(cdev)); +err1: + __qed_ll2_stop(p_hwfn); +err0: + qed_ll2_kill_buffers(cdev); + cdev->ll2->handle = QED_LL2_UNUSED_HANDLE; + return rc; +} + +static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, + unsigned long xmit_flags) +{ + struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); + struct qed_ll2_tx_pkt_info pkt; + const skb_frag_t *frag; + u8 flags = 0, nr_frags; + int rc = -EINVAL, i; + dma_addr_t mapping; + u16 vlan = 0; + + if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { + DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); + return -EINVAL; + } + + /* Cache number of fragments from SKB since SKB may be freed by + * the completion routine after calling qed_ll2_prepare_tx_packet() + */ + nr_frags = skb_shinfo(skb)->nr_frags; + + if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) { + DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", + 1 + nr_frags); + return -EINVAL; + } + + mapping = dma_map_single(&cdev->pdev->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { + DP_NOTICE(cdev, "SKB mapping failed\n"); + return -EINVAL; + } + + /* Request HW to calculate IP csum */ + if (!((vlan_get_protocol(skb) == htons(ETH_P_IPV6)) && + ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) + flags |= BIT(CORE_TX_BD_DATA_IP_CSUM_SHIFT); + + if (skb_vlan_tag_present(skb)) { + vlan = skb_vlan_tag_get(skb); + flags |= BIT(CORE_TX_BD_DATA_VLAN_INSERTION_SHIFT); + } + + memset(&pkt, 0, sizeof(pkt)); + pkt.num_of_bds = 1 + nr_frags; + pkt.vlan = vlan; + pkt.bd_flags = flags; + pkt.tx_dest = QED_LL2_TX_DEST_NW; + pkt.first_frag = mapping; + pkt.first_frag_len = skb->len; + pkt.cookie = skb; + if (test_bit(QED_MF_UFP_SPECIFIC, &cdev->mf_bits) && + test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) + pkt.remove_stag = true; + + /* qed_ll2_prepare_tx_packet() may actually send the packet if + * there are no fragments in the skb and subsequently the completion + * routine may run and free the SKB, so no dereferencing the SKB + * beyond this point unless skb has any fragments. + */ + rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle, + &pkt, 1); + if (unlikely(rc)) + goto err; + + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + + mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { + DP_NOTICE(cdev, + "Unable to map frag - dropping packet\n"); + rc = -ENOMEM; + goto err; + } + + rc = qed_ll2_set_fragment_of_tx_packet(p_hwfn, + cdev->ll2->handle, + mapping, + skb_frag_size(frag)); + + /* if failed not much to do here, partial packet has been posted + * we can't free memory, will need to wait for completion + */ + if (unlikely(rc)) + goto err2; + } + + return 0; + +err: + dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); +err2: + return rc; +} + +static int qed_ll2_stats(struct qed_dev *cdev, struct qed_ll2_stats *stats) +{ + bool b_is_storage_eng1 = qed_ll2_is_storage_eng1(cdev); + struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); + int rc; + + if (!cdev->ll2) + return -EINVAL; + + rc = qed_ll2_get_stats(p_hwfn, cdev->ll2->handle, stats); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to get LL2 stats\n"); + return rc; + } + + /* In CMT mode, LL2 is always started on engine 0 for a storage PF */ + if (b_is_storage_eng1) { + rc = __qed_ll2_get_stats(QED_LEADING_HWFN(cdev), + cdev->ll2->handle, stats); + if (rc) { + DP_NOTICE(QED_LEADING_HWFN(cdev), + "Failed to get LL2 stats on engine 0\n"); + return rc; + } + } + + return 0; +} + +const struct qed_ll2_ops qed_ll2_ops_pass = { + .start = &qed_ll2_start, + .stop = &qed_ll2_stop, + .start_xmit = &qed_ll2_start_xmit, + .register_cb_ops = &qed_ll2_register_cb_ops, + .get_stats = &qed_ll2_stats, +}; + +int qed_ll2_alloc_if(struct qed_dev *cdev) +{ + cdev->ll2 = kzalloc(sizeof(*cdev->ll2), GFP_KERNEL); + return cdev->ll2 ? 0 : -ENOMEM; +} + +void qed_ll2_dealloc_if(struct qed_dev *cdev) +{ + kfree(cdev->ll2); + cdev->ll2 = NULL; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h new file mode 100644 index 0000000000..a174c6fc62 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -0,0 +1,260 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_LL2_H +#define _QED_LL2_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_chain.h> +#include <linux/qed/qed_ll2_if.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_sp.h" + +#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4) +/* LL2 queues handles will be split as follows: + * first will be legacy queues, and then the ctx based queues. + */ +#define QED_MAX_NUM_OF_LL2_CONNS_PF (4) +#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3) + +#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \ + (QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF) + +#define QED_LL2_LEGACY_CONN_BASE_PF 0 +#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF + +struct qed_ll2_rx_packet { + struct list_head list_entry; + struct core_rx_bd_with_buff_len *rxq_bd; + dma_addr_t rx_buf_addr; + u16 buf_length; + void *cookie; + u8 placement_offset; + u16 parse_flags; + u16 packet_length; + u16 vlan; + u32 opaque_data[2]; +}; + +struct qed_ll2_tx_packet { + struct list_head list_entry; + u16 bd_used; + bool notify_fw; + void *cookie; + /* Flexible Array of bds_set determined by max_bds_per_packet */ + struct { + struct core_tx_bd *txq_bd; + dma_addr_t tx_frag; + u16 frag_len; + } bds_set[]; +}; + +struct qed_ll2_rx_queue { + /* Lock protecting the Rx queue manipulation */ + spinlock_t lock; + struct qed_chain rxq_chain; + struct qed_chain rcq_chain; + u8 rx_sb_index; + u8 ctx_based; + bool b_cb_registered; + __le16 *p_fw_cons; + struct list_head active_descq; + struct list_head free_descq; + struct list_head posting_descq; + struct qed_ll2_rx_packet *descq_array; + void __iomem *set_prod_addr; + struct core_pwm_prod_update_data db_data; +}; + +struct qed_ll2_tx_queue { + /* Lock protecting the Tx queue manipulation */ + spinlock_t lock; + struct qed_chain txq_chain; + u8 tx_sb_index; + bool b_cb_registered; + __le16 *p_fw_cons; + struct list_head active_descq; + struct list_head free_descq; + struct list_head sending_descq; + u16 cur_completing_bd_idx; + void __iomem *doorbell_addr; + struct core_db_data db_msg; + u16 bds_idx; + u16 cur_send_frag_num; + u16 cur_completing_frag_num; + bool b_completing_packet; + void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/ + struct qed_ll2_tx_packet *cur_send_packet; + struct qed_ll2_tx_packet cur_completing_packet; +}; + +struct qed_ll2_info { + /* Lock protecting the state of LL2 */ + struct mutex mutex; + + struct qed_ll2_acquire_data_inputs input; + u32 cid; + u8 my_id; + u8 queue_id; + u8 tx_stats_id; + bool b_active; + enum core_tx_dest tx_dest; + u8 tx_stats_en; + bool main_func_queue; + struct qed_ll2_cbs cbs; + struct qed_ll2_rx_queue rx_queue; + struct qed_ll2_tx_queue tx_queue; +}; + +extern const struct qed_ll2_ops qed_ll2_ops_pass; + +/** + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. + * + * Return: Int. + */ +int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); + +/** + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_establish_connection(void *cxt, u8 connection_handle); + +/** + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_post_rx_buffer(void *cxt, + u8 connection_handle, + dma_addr_t addr, + u16 buf_len, void *cookie, u8 notify_fw); + +/** + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_prepare_tx_packet(void *cxt, + u8 connection_handle, + struct qed_ll2_tx_pkt_info *pkt, + bool notify_fw); + +/** + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * + * Return: Void. + */ +void qed_ll2_release_connection(void *cxt, u8 connection_handle); + +/** + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_set_fragment_of_tx_packet(void *cxt, + u8 connection_handle, + dma_addr_t addr, u16 nbytes); + +/** + * qed_ll2_terminate_connection(): Stops Tx/Rx queues + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); + +/** + * qed_ll2_get_stats(): Get LL2 queue's statistics + * + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. + * + * Return: 0 on success, failure otherwise. + */ +int qed_ll2_get_stats(void *cxt, + u8 connection_handle, struct qed_ll2_stats *p_stats); + +/** + * qed_ll2_alloc(): Allocates LL2 connections set. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_ll2_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_ll2_setup(): Inits LL2 connections set. + * + * @p_hwfn: HW device data. + * + * Return: Void. + * + */ +void qed_ll2_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. + * + * Return: Void. + * + */ +void qed_ll2_free(struct qed_hwfn *p_hwfn); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c new file mode 100644 index 0000000000..c278f88930 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -0,0 +1,3206 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/stddef.h> +#include <linux/pci.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/string.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/workqueue.h> +#include <linux/ethtool.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> +#include <linux/crash_dump.h> +#include <linux/crc32.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_ll2_if.h> +#include <net/devlink.h> +#include <linux/phylink.h> + +#include "qed.h" +#include "qed_sriov.h" +#include "qed_sp.h" +#include "qed_dev_api.h" +#include "qed_ll2.h" +#include "qed_fcoe.h" +#include "qed_iscsi.h" + +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_hw.h" +#include "qed_selftest.h" +#include "qed_debug.h" +#include "qed_devlink.h" + +#define QED_ROCE_QPS (8192) +#define QED_ROCE_DPIS (8) +#define QED_RDMA_SRQS QED_ROCE_QPS +#define QED_NVM_CFG_GET_FLAGS 0xA +#define QED_NVM_CFG_GET_PF_FLAGS 0x1A +#define QED_NVM_CFG_MAX_ATTRS 50 + +static char version[] = + "QLogic FastLinQ 4xxxx Core Module qed\n"; + +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); +MODULE_LICENSE("GPL"); + +#define FW_FILE_VERSION \ + __stringify(FW_MAJOR_VERSION) "." \ + __stringify(FW_MINOR_VERSION) "." \ + __stringify(FW_REVISION_VERSION) "." \ + __stringify(FW_ENGINEERING_VERSION) + +#define QED_FW_FILE_NAME \ + "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" + +MODULE_FIRMWARE(QED_FW_FILE_NAME); + +/* MFW speed capabilities maps */ + +struct qed_mfw_speed_map { + u32 mfw_val; + __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); + + const u32 *cap_arr; + u32 arr_size; +}; + +#define QED_MFW_SPEED_MAP(type, arr) \ +{ \ + .mfw_val = (type), \ + .cap_arr = (arr), \ + .arr_size = ARRAY_SIZE(arr), \ +} + +static const u32 qed_mfw_ext_1g[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qed_mfw_ext_10g[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qed_mfw_ext_25g[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qed_mfw_ext_40g[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, +}; + +static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, +}; + +static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, + qed_mfw_ext_50g_base_r), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, + qed_mfw_ext_50g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, + qed_mfw_ext_100g_base_r2), + QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, + qed_mfw_ext_100g_base_r4), +}; + +static const u32 qed_mfw_legacy_1g[] __initconst = { + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, +}; + +static const u32 qed_mfw_legacy_10g[] __initconst = { + ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, +}; + +static const u32 qed_mfw_legacy_20g[] __initconst = { + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, +}; + +static const u32 qed_mfw_legacy_25g[] __initconst = { + ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, +}; + +static const u32 qed_mfw_legacy_40g[] __initconst = { + ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, +}; + +static const u32 qed_mfw_legacy_50g[] __initconst = { + ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, +}; + +static const u32 qed_mfw_legacy_bb_100g[] __initconst = { + ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, +}; + +static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, + qed_mfw_legacy_1g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, + qed_mfw_legacy_10g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, + qed_mfw_legacy_20g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, + qed_mfw_legacy_25g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, + qed_mfw_legacy_40g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, + qed_mfw_legacy_50g), + QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, + qed_mfw_legacy_bb_100g), +}; + +static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) +{ + linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); + + map->cap_arr = NULL; + map->arr_size = 0; +} + +static void __init qed_mfw_speed_maps_init(void) +{ + u32 i; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) + qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); + + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) + qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); +} + +static int __init qed_init(void) +{ + pr_info("%s", version); + + qed_mfw_speed_maps_init(); + + return 0; +} +module_init(qed_init); + +static void __exit qed_exit(void) +{ + /* To prevent marking this module as "permanent" */ +} +module_exit(qed_exit); + +static void qed_free_pci(struct qed_dev *cdev) +{ + struct pci_dev *pdev = cdev->pdev; + + if (cdev->doorbells && cdev->db_size) + iounmap(cdev->doorbells); + if (cdev->regview) + iounmap(cdev->regview); + if (atomic_read(&pdev->enable_cnt) == 1) + pci_release_regions(pdev); + + pci_disable_device(pdev); +} + +#define PCI_REVISION_ID_ERROR_VAL 0xff + +/* Performs PCI initializations as well as initializing PCI-related parameters + * in the device structrue. Returns 0 in case of success. + */ +static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) +{ + u8 rev_id; + int rc; + + cdev->pdev = pdev; + + rc = pci_enable_device(pdev); + if (rc) { + DP_NOTICE(cdev, "Cannot enable PCI device\n"); + goto err0; + } + + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { + DP_NOTICE(cdev, "No memory region found in bar #0\n"); + rc = -EIO; + goto err1; + } + + if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { + DP_NOTICE(cdev, "No memory region found in bar #2\n"); + rc = -EIO; + goto err1; + } + + if (atomic_read(&pdev->enable_cnt) == 1) { + rc = pci_request_regions(pdev, "qed"); + if (rc) { + DP_NOTICE(cdev, + "Failed to request PCI memory resources\n"); + goto err1; + } + pci_set_master(pdev); + pci_save_state(pdev); + } + + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + if (rev_id == PCI_REVISION_ID_ERROR_VAL) { + DP_NOTICE(cdev, + "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", + rev_id); + rc = -ENODEV; + goto err2; + } + if (!pci_is_pcie(pdev)) { + DP_NOTICE(cdev, "The bus is not PCI Express\n"); + rc = -EIO; + goto err2; + } + + cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (IS_PF(cdev) && !cdev->pci_params.pm_cap) + DP_NOTICE(cdev, "Cannot find power management capability\n"); + + rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); + if (rc) { + DP_NOTICE(cdev, "Can't request DMA addresses\n"); + rc = -EIO; + goto err2; + } + + cdev->pci_params.mem_start = pci_resource_start(pdev, 0); + cdev->pci_params.mem_end = pci_resource_end(pdev, 0); + cdev->pci_params.irq = pdev->irq; + + cdev->regview = pci_ioremap_bar(pdev, 0); + if (!cdev->regview) { + DP_NOTICE(cdev, "Cannot map register space, aborting\n"); + rc = -ENOMEM; + goto err2; + } + + cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); + cdev->db_size = pci_resource_len(cdev->pdev, 2); + if (!cdev->db_size) { + if (IS_PF(cdev)) { + DP_NOTICE(cdev, "No Doorbell bar available\n"); + return -EINVAL; + } else { + return 0; + } + } + + cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); + + if (!cdev->doorbells) { + DP_NOTICE(cdev, "Cannot map doorbell space\n"); + return -ENOMEM; + } + + return 0; + +err2: + pci_release_regions(pdev); +err1: + pci_disable_device(pdev); +err0: + return rc; +} + +int qed_fill_dev_info(struct qed_dev *cdev, + struct qed_dev_info *dev_info) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_hw_info *hw_info = &p_hwfn->hw_info; + struct qed_tunnel_info *tun = &cdev->tunnel; + struct qed_ptt *ptt; + + memset(dev_info, 0, sizeof(struct qed_dev_info)); + + if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->vxlan.b_mode_enabled) + dev_info->vxlan_enable = true; + + if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && + tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) + dev_info->gre_enable = true; + + if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && + tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && + tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) + dev_info->geneve_enable = true; + + dev_info->num_hwfns = cdev->num_hwfns; + dev_info->pci_mem_start = cdev->pci_params.mem_start; + dev_info->pci_mem_end = cdev->pci_params.mem_end; + dev_info->pci_irq = cdev->pci_params.irq; + dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); + dev_info->dev_type = cdev->type; + ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); + + if (IS_PF(cdev)) { + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, + &cdev->mf_bits); + if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) + dev_info->b_arfs_capable = true; + dev_info->tx_switching = true; + + if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) + dev_info->wol_support = true; + + dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); + dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); + dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; + } else { + qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, + &dev_info->fw_minor, &dev_info->fw_rev, + &dev_info->fw_eng); + } + + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (ptt) { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mfw_rev, NULL); + + qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, + &dev_info->mbi_version); + + qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, + &dev_info->flash_size); + + qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); + } + } else { + qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, + &dev_info->mfw_rev, NULL); + } + + dev_info->mtu = hw_info->mtu; + cdev->common_dev_info = *dev_info; + + return 0; +} + +static void qed_free_cdev(struct qed_dev *cdev) +{ + kfree((void *)cdev); +} + +static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) +{ + struct qed_dev *cdev; + + cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); + if (!cdev) + return cdev; + + qed_init_struct(cdev); + + return cdev; +} + +/* Sets the requested power state */ +static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) +{ + if (!cdev) + return -ENODEV; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); + return 0; +} + +/* probing */ +static struct qed_dev *qed_probe(struct pci_dev *pdev, + struct qed_probe_params *params) +{ + struct qed_dev *cdev; + int rc; + + cdev = qed_alloc_cdev(pdev); + if (!cdev) + goto err0; + + cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; + cdev->protocol = params->protocol; + + if (params->is_vf) + cdev->b_is_vf = true; + + qed_init_dp(cdev, params->dp_module, params->dp_level); + + cdev->recov_in_prog = params->recov_in_prog; + + rc = qed_init_pci(cdev, pdev); + if (rc) { + DP_ERR(cdev, "init pci failed\n"); + goto err1; + } + DP_INFO(cdev, "PCI init completed successfully\n"); + + rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); + if (rc) { + DP_ERR(cdev, "hw prepare failed\n"); + goto err2; + } + + DP_INFO(cdev, "%s completed successfully\n", __func__); + + return cdev; + +err2: + qed_free_pci(cdev); +err1: + qed_free_cdev(cdev); +err0: + return NULL; +} + +static void qed_remove(struct qed_dev *cdev) +{ + if (!cdev) + return; + + qed_hw_remove(cdev); + + qed_free_pci(cdev); + + qed_set_power_state(cdev, PCI_D3hot); + + qed_free_cdev(cdev); +} + +static void qed_disable_msix(struct qed_dev *cdev) +{ + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + pci_disable_msix(cdev->pdev); + kfree(cdev->int_params.msix_table); + } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { + pci_disable_msi(cdev->pdev); + } + + memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); +} + +static int qed_enable_msix(struct qed_dev *cdev, + struct qed_int_params *int_params) +{ + int i, rc, cnt; + + cnt = int_params->in.num_vectors; + + for (i = 0; i < cnt; i++) + int_params->msix_table[i].entry = i; + + rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, + int_params->in.min_msix_cnt, cnt); + if (rc < cnt && rc >= int_params->in.min_msix_cnt && + (rc % cdev->num_hwfns)) { + pci_disable_msix(cdev->pdev); + + /* If fastpath is initialized, we need at least one interrupt + * per hwfn [and the slow path interrupts]. New requested number + * should be a multiple of the number of hwfns. + */ + cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; + DP_NOTICE(cdev, + "Trying to enable MSI-X with less vectors (%d out of %d)\n", + cnt, int_params->in.num_vectors); + rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, + cnt); + if (!rc) + rc = cnt; + } + + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { + /* MSI-x configuration was achieved */ + int_params->out.int_mode = QED_INT_MODE_MSIX; + int_params->out.num_vectors = rc; + rc = 0; + } else { + DP_NOTICE(cdev, + "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", + cnt, rc); + } + + return rc; +} + +/* This function outputs the int mode and the number of enabled msix vector */ +static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) +{ + struct qed_int_params *int_params = &cdev->int_params; + struct msix_entry *tbl; + int rc = 0, cnt; + + switch (int_params->in.int_mode) { + case QED_INT_MODE_MSIX: + /* Allocate MSIX table */ + cnt = int_params->in.num_vectors; + int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); + if (!int_params->msix_table) { + rc = -ENOMEM; + goto out; + } + + /* Enable MSIX */ + rc = qed_enable_msix(cdev, int_params); + if (!rc) + goto out; + + DP_NOTICE(cdev, "Failed to enable MSI-X\n"); + kfree(int_params->msix_table); + if (force_mode) + goto out; + fallthrough; + + case QED_INT_MODE_MSI: + if (cdev->num_hwfns == 1) { + rc = pci_enable_msi(cdev->pdev); + if (!rc) { + int_params->out.int_mode = QED_INT_MODE_MSI; + goto out; + } + + DP_NOTICE(cdev, "Failed to enable MSI\n"); + if (force_mode) + goto out; + } + fallthrough; + + case QED_INT_MODE_INTA: + int_params->out.int_mode = QED_INT_MODE_INTA; + rc = 0; + goto out; + default: + DP_NOTICE(cdev, "Unknown int_mode value %d\n", + int_params->in.int_mode); + rc = -EINVAL; + } + +out: + if (!rc) + DP_INFO(cdev, "Using %s interrupts\n", + int_params->out.int_mode == QED_INT_MODE_INTA ? + "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? + "MSI" : "MSIX"); + cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; + + return rc; +} + +static void qed_simd_handler_config(struct qed_dev *cdev, void *token, + int index, void(*handler)(void *)) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; + int relative_idx = index / cdev->num_hwfns; + + hwfn->simd_proto_handler[relative_idx].func = handler; + hwfn->simd_proto_handler[relative_idx].token = token; +} + +static void qed_simd_handler_clean(struct qed_dev *cdev, int index) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; + int relative_idx = index / cdev->num_hwfns; + + memset(&hwfn->simd_proto_handler[relative_idx], 0, + sizeof(struct qed_simd_fp_handler)); +} + +static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) +{ + tasklet_schedule((struct tasklet_struct *)tasklet); + return IRQ_HANDLED; +} + +static irqreturn_t qed_single_int(int irq, void *dev_instance) +{ + struct qed_dev *cdev = (struct qed_dev *)dev_instance; + struct qed_hwfn *hwfn; + irqreturn_t rc = IRQ_NONE; + u64 status; + int i, j; + + for (i = 0; i < cdev->num_hwfns; i++) { + status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); + + if (!status) + continue; + + hwfn = &cdev->hwfns[i]; + + /* Slowpath interrupt */ + if (unlikely(status & 0x1)) { + tasklet_schedule(&hwfn->sp_dpc); + status &= ~0x1; + rc = IRQ_HANDLED; + } + + /* Fastpath interrupts */ + for (j = 0; j < 64; j++) { + if ((0x2ULL << j) & status) { + struct qed_simd_fp_handler *p_handler = + &hwfn->simd_proto_handler[j]; + + if (p_handler->func) + p_handler->func(p_handler->token); + else + DP_NOTICE(hwfn, + "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", + j, status); + + status &= ~(0x2ULL << j); + rc = IRQ_HANDLED; + } + } + + if (unlikely(status)) + DP_VERBOSE(hwfn, NETIF_MSG_INTR, + "got an unknown interrupt status 0x%llx\n", + status); + } + + return rc; +} + +int qed_slowpath_irq_req(struct qed_hwfn *hwfn) +{ + struct qed_dev *cdev = hwfn->cdev; + u32 int_mode; + int rc = 0; + u8 id; + + int_mode = cdev->int_params.out.int_mode; + if (int_mode == QED_INT_MODE_MSIX) { + id = hwfn->my_id; + snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", + id, cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + rc = request_irq(cdev->int_params.msix_table[id].vector, + qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); + } else { + unsigned long flags = 0; + + snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", + cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), + PCI_FUNC(cdev->pdev->devfn)); + + if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) + flags |= IRQF_SHARED; + + rc = request_irq(cdev->pdev->irq, qed_single_int, + flags, cdev->name, cdev); + } + + if (rc) + DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); + else + DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), + "Requested slowpath %s\n", + (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); + + return rc; +} + +static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) +{ + /* Calling the disable function will make sure that any + * currently-running function is completed. The following call to the + * enable function makes this sequence a flush-like operation. + */ + if (p_hwfn->b_sp_dpc_enabled) { + tasklet_disable(&p_hwfn->sp_dpc); + tasklet_enable(&p_hwfn->sp_dpc); + } +} + +void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u8 id = p_hwfn->my_id; + u32 int_mode; + + int_mode = cdev->int_params.out.int_mode; + if (int_mode == QED_INT_MODE_MSIX) + synchronize_irq(cdev->int_params.msix_table[id].vector); + else + synchronize_irq(cdev->pdev->irq); + + qed_slowpath_tasklet_flush(p_hwfn); +} + +static void qed_slowpath_irq_free(struct qed_dev *cdev) +{ + int i; + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].b_int_requested) + break; + free_irq(cdev->int_params.msix_table[i].vector, + &cdev->hwfns[i].sp_dpc); + } + } else { + if (QED_LEADING_HWFN(cdev)->b_int_requested) + free_irq(cdev->pdev->irq, cdev); + } + qed_int_disable_post_isr_release(cdev); +} + +static int qed_nic_stop(struct qed_dev *cdev) +{ + int i, rc; + + rc = qed_hw_stop(cdev); + + for (i = 0; i < cdev->num_hwfns; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (p_hwfn->b_sp_dpc_enabled) { + tasklet_disable(&p_hwfn->sp_dpc); + p_hwfn->b_sp_dpc_enabled = false; + DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, + "Disabled sp tasklet [hwfn %d] at %p\n", + i, &p_hwfn->sp_dpc); + } + } + + qed_dbg_pf_exit(cdev); + + return rc; +} + +static int qed_nic_setup(struct qed_dev *cdev) +{ + int rc, i; + + /* Determine if interface is going to require LL2 */ + if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { + for (i = 0; i < cdev->num_hwfns; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->using_ll2 = true; + } + } + + rc = qed_resc_alloc(cdev); + if (rc) + return rc; + + DP_INFO(cdev, "Allocated qed resources\n"); + + qed_resc_setup(cdev); + + return rc; +} + +static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) +{ + int limit = 0; + + /* Mark the fastpath as free/used */ + cdev->int_params.fp_initialized = cnt ? true : false; + + if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) + limit = cdev->num_hwfns * 63; + else if (cdev->int_params.fp_msix_cnt) + limit = cdev->int_params.fp_msix_cnt; + + if (!limit) + return -ENOMEM; + + return min_t(int, cnt, limit); +} + +static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) +{ + memset(info, 0, sizeof(struct qed_int_info)); + + if (!cdev->int_params.fp_initialized) { + DP_INFO(cdev, + "Protocol driver requested interrupt information, but its support is not yet configured\n"); + return -EINVAL; + } + + /* Need to expose only MSI-X information; Single IRQ is handled solely + * by qed. + */ + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int msix_base = cdev->int_params.fp_msix_base; + + info->msix_cnt = cdev->int_params.fp_msix_cnt; + info->msix = &cdev->int_params.msix_table[msix_base]; + } + + return 0; +} + +static int qed_slowpath_setup_int(struct qed_dev *cdev, + enum qed_int_mode int_mode) +{ + struct qed_sb_cnt_info sb_cnt_info; + int num_l2_queues = 0; + int rc; + int i; + + if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { + DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); + return -EINVAL; + } + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + cdev->int_params.in.int_mode = int_mode; + for_each_hwfn(cdev, i) { + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); + cdev->int_params.in.num_vectors += sb_cnt_info.cnt; + cdev->int_params.in.num_vectors++; /* slowpath */ + } + + /* We want a minimum of one slowpath and one fastpath vector per hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; + + if (is_kdump_kernel()) { + DP_INFO(cdev, + "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", + cdev->int_params.in.min_msix_cnt); + cdev->int_params.in.num_vectors = + cdev->int_params.in.min_msix_cnt; + } + + rc = qed_set_int_mode(cdev, false); + if (rc) { + DP_ERR(cdev, "%s ERR\n", __func__); + return rc; + } + + cdev->int_params.fp_msix_base = cdev->num_hwfns; + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - + cdev->num_hwfns; + + if (!IS_ENABLED(CONFIG_QED_RDMA) || + !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) + return 0; + + for_each_hwfn(cdev, i) + num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); + + DP_VERBOSE(cdev, QED_MSG_RDMA, + "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", + cdev->int_params.fp_msix_cnt, num_l2_queues); + + if (cdev->int_params.fp_msix_cnt > num_l2_queues) { + cdev->int_params.rdma_msix_cnt = + (cdev->int_params.fp_msix_cnt - num_l2_queues) + / cdev->num_hwfns; + cdev->int_params.rdma_msix_base = + cdev->int_params.fp_msix_base + num_l2_queues; + cdev->int_params.fp_msix_cnt = num_l2_queues; + } else { + cdev->int_params.rdma_msix_cnt = 0; + } + + DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", + cdev->int_params.rdma_msix_cnt, + cdev->int_params.rdma_msix_base); + + return 0; +} + +static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) +{ + int rc; + + memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); + cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; + + qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), + &cdev->int_params.in.num_vectors); + if (cdev->num_hwfns > 1) { + u8 vectors = 0; + + qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); + cdev->int_params.in.num_vectors += vectors; + } + + /* We want a minimum of one fastpath vector per vf hwfn */ + cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; + + rc = qed_set_int_mode(cdev, true); + if (rc) + return rc; + + cdev->int_params.fp_msix_base = 0; + cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; + + return 0; +} + +u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf) +{ + int rc; + + p_hwfn->stream->next_in = input_buf; + p_hwfn->stream->avail_in = input_len; + p_hwfn->stream->next_out = unzip_buf; + p_hwfn->stream->avail_out = max_size; + + rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); + + if (rc != Z_OK) { + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", + rc); + return 0; + } + + rc = zlib_inflate(p_hwfn->stream, Z_FINISH); + zlib_inflateEnd(p_hwfn->stream); + + if (rc != Z_OK && rc != Z_STREAM_END) { + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", + p_hwfn->stream->msg, rc); + return 0; + } + + return p_hwfn->stream->total_out / 4; +} + +static int qed_alloc_stream_mem(struct qed_dev *cdev) +{ + int i; + void *workspace; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); + if (!p_hwfn->stream) + return -ENOMEM; + + workspace = vzalloc(zlib_inflate_workspacesize()); + if (!workspace) + return -ENOMEM; + p_hwfn->stream->workspace = workspace; + } + + return 0; +} + +static void qed_free_stream_mem(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (!p_hwfn->stream) + return; + + vfree(p_hwfn->stream->workspace); + kfree(p_hwfn->stream); + } +} + +static void qed_update_pf_params(struct qed_dev *cdev, + struct qed_pf_params *params) +{ + int i; + + if (IS_ENABLED(CONFIG_QED_RDMA)) { + params->rdma_pf_params.num_qps = QED_ROCE_QPS; + params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; + params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; + /* divide by 3 the MRs to avoid MF ILT overflow */ + params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; + } + + if (cdev->num_hwfns > 1 || IS_VF(cdev)) + params->eth_pf_params.num_arfs_filters = 0; + + /* In case we might support RDMA, don't allow qede to be greedy + * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] + * per hwfn. + */ + if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { + u16 *num_cons; + + num_cons = ¶ms->eth_pf_params.num_cons; + *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); + } + + for (i = 0; i < cdev->num_hwfns; i++) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + p_hwfn->pf_params = *params; + } +} + +#define QED_PERIODIC_DB_REC_COUNT 10 +#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 +#define QED_PERIODIC_DB_REC_INTERVAL \ + msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) + +static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, + enum qed_slowpath_wq_flag wq_flag, + unsigned long delay) +{ + if (!hwfn->slowpath_wq_active) + return -EINVAL; + + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(wq_flag, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); + + return 0; +} + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) +{ + /* Reset periodic Doorbell Recovery counter */ + p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; + + /* Don't schedule periodic Doorbell Recovery if already scheduled */ + if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &p_hwfn->slowpath_task_flags)) + return; + + qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); +} + +static void qed_slowpath_wq_stop(struct qed_dev *cdev) +{ + int i; + + if (IS_VF(cdev)) + return; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].slowpath_wq) + continue; + + /* Stop queuing new delayed works */ + cdev->hwfns[i].slowpath_wq_active = false; + + cancel_delayed_work(&cdev->hwfns[i].slowpath_task); + destroy_workqueue(cdev->hwfns[i].slowpath_wq); + } +} + +static void qed_slowpath_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + slowpath_task.work); + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + if (hwfn->slowpath_wq_active) + queue_delayed_work(hwfn->slowpath_wq, + &hwfn->slowpath_task, 0); + + return; + } + + if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, + &hwfn->slowpath_task_flags)) + qed_mfw_process_tlv_req(hwfn, ptt); + + if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &hwfn->slowpath_task_flags)) { + /* skip qed_db_rec_handler during recovery/unload */ + if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) + goto out; + + qed_db_rec_handler(hwfn, ptt); + if (hwfn->periodic_db_rec_count--) + qed_slowpath_delayed_work(hwfn, + QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); + } + +out: + qed_ptt_release(hwfn, ptt); +} + +static int qed_slowpath_wq_start(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + char name[NAME_SIZE]; + int i; + + if (IS_VF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + + snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); + + hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); + if (!hwfn->slowpath_wq) { + DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + hwfn->slowpath_wq_active = true; + } + + return 0; +} + +static int qed_slowpath_start(struct qed_dev *cdev, + struct qed_slowpath_params *params) +{ + struct qed_drv_load_params drv_load_params; + struct qed_hw_init_params hw_init_params; + struct qed_mcp_drv_version drv_version; + struct qed_tunnel_info tunn_info; + const u8 *data = NULL; + struct qed_hwfn *hwfn; + struct qed_ptt *p_ptt; + int rc = -EINVAL; + + if (qed_iov_wq_start(cdev)) + goto err; + + if (qed_slowpath_wq_start(cdev)) + goto err; + + if (IS_PF(cdev)) { + rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, + &cdev->pdev->dev); + if (rc) { + DP_NOTICE(cdev, + "Failed to find fw file - /lib/firmware/%s\n", + QED_FW_FILE_NAME); + goto err; + } + + if (cdev->num_hwfns == 1) { + p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); + if (p_ptt) { + QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; + } else { + DP_NOTICE(cdev, + "Failed to acquire PTT for aRFS\n"); + rc = -EINVAL; + goto err; + } + } + } + + cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; + rc = qed_nic_setup(cdev); + if (rc) + goto err; + + if (IS_PF(cdev)) + rc = qed_slowpath_setup_int(cdev, params->int_mode); + else + rc = qed_slowpath_vf_setup_int(cdev); + if (rc) + goto err1; + + if (IS_PF(cdev)) { + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(cdev); + if (rc) + goto err2; + + /* First Dword used to differentiate between various sources */ + data = cdev->firmware->data + sizeof(u32); + + qed_dbg_pf_init(cdev); + } + + /* Start the slowpath */ + memset(&hw_init_params, 0, sizeof(hw_init_params)); + memset(&tunn_info, 0, sizeof(tunn_info)); + tunn_info.vxlan.b_mode_enabled = true; + tunn_info.l2_gre.b_mode_enabled = true; + tunn_info.ip_gre.b_mode_enabled = true; + tunn_info.l2_geneve.b_mode_enabled = true; + tunn_info.ip_geneve.b_mode_enabled = true; + tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; + hw_init_params.p_tunn = &tunn_info; + hw_init_params.b_hw_start = true; + hw_init_params.int_mode = cdev->int_params.out.int_mode; + hw_init_params.allow_npar_tx_switch = true; + hw_init_params.bin_fw_data = data; + + memset(&drv_load_params, 0, sizeof(drv_load_params)); + drv_load_params.is_crash_kernel = is_kdump_kernel(); + drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; + drv_load_params.avoid_eng_reset = false; + drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; + hw_init_params.p_drv_load_params = &drv_load_params; + + rc = qed_hw_init(cdev, &hw_init_params); + if (rc) + goto err2; + + DP_INFO(cdev, + "HW initialization and function start completed successfully\n"); + + if (IS_PF(cdev)) { + cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | + BIT(QED_MODE_L2GENEVE_TUNN) | + BIT(QED_MODE_IPGENEVE_TUNN) | + BIT(QED_MODE_L2GRE_TUNN) | + BIT(QED_MODE_IPGRE_TUNN)); + } + + /* Allocate LL2 interface if needed */ + if (QED_LEADING_HWFN(cdev)->using_ll2) { + rc = qed_ll2_alloc_if(cdev); + if (rc) + goto err3; + } + if (IS_PF(cdev)) { + hwfn = QED_LEADING_HWFN(cdev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | + (params->drv_eng); + strscpy(drv_version.name, params->name, + MCP_DRV_VER_STR_SIZE - 4); + rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_NOTICE(cdev, "Failed sending drv version command\n"); + goto err4; + } + } + + qed_reset_vport_stats(cdev); + + return 0; + +err4: + qed_ll2_dealloc_if(cdev); +err3: + qed_hw_stop(cdev); +err2: + qed_hw_timers_stop_all(cdev); + if (IS_PF(cdev)) + qed_slowpath_irq_free(cdev); + qed_free_stream_mem(cdev); + qed_disable_msix(cdev); +err1: + qed_resc_free(cdev); +err: + if (IS_PF(cdev)) + release_firmware(cdev->firmware); + + if (IS_PF(cdev) && (cdev->num_hwfns == 1) && + QED_LEADING_HWFN(cdev)->p_arfs_ptt) + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_arfs_ptt); + + qed_iov_wq_stop(cdev, false); + + qed_slowpath_wq_stop(cdev); + + return rc; +} + +static int qed_slowpath_stop(struct qed_dev *cdev) +{ + if (!cdev) + return -ENODEV; + + qed_slowpath_wq_stop(cdev); + + qed_ll2_dealloc_if(cdev); + + if (IS_PF(cdev)) { + if (cdev->num_hwfns == 1) + qed_ptt_release(QED_LEADING_HWFN(cdev), + QED_LEADING_HWFN(cdev)->p_arfs_ptt); + qed_free_stream_mem(cdev); + if (IS_QED_ETH_IF(cdev)) + qed_sriov_disable(cdev, true); + } + + qed_nic_stop(cdev); + + if (IS_PF(cdev)) + qed_slowpath_irq_free(cdev); + + qed_disable_msix(cdev); + + qed_resc_free(cdev); + + qed_iov_wq_stop(cdev, true); + + if (IS_PF(cdev)) + release_firmware(cdev->firmware); + + return 0; +} + +static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) +{ + int i; + + memcpy(cdev->name, name, NAME_SIZE); + for_each_hwfn(cdev, i) + snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); +} + +static u32 qed_sb_init(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id, + enum qed_sb_type type) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + u16 rel_sb_id; + u32 rc; + + /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ + if (type == QED_SB_TYPE_L2_QUEUE) { + p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; + rel_sb_id = sb_id / cdev->num_hwfns; + } else { + p_hwfn = QED_AFFIN_HWFN(cdev); + rel_sb_id = sb_id; + } + + DP_VERBOSE(cdev, NETIF_MSG_INTR, + "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); + + if (IS_PF(p_hwfn->cdev)) { + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + qed_ptt_release(p_hwfn, p_ptt); + } else { + rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, + sb_phy_addr, rel_sb_id); + } + + return rc; +} + +static u32 qed_sb_release(struct qed_dev *cdev, + struct qed_sb_info *sb_info, + u16 sb_id, + enum qed_sb_type type) +{ + struct qed_hwfn *p_hwfn; + u16 rel_sb_id; + u32 rc; + + /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ + if (type == QED_SB_TYPE_L2_QUEUE) { + p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; + rel_sb_id = sb_id / cdev->num_hwfns; + } else { + p_hwfn = QED_AFFIN_HWFN(cdev); + rel_sb_id = sb_id; + } + + DP_VERBOSE(cdev, NETIF_MSG_INTR, + "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); + + rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); + + return rc; +} + +static bool qed_can_link_change(struct qed_dev *cdev) +{ + return true; +} + +static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, + const struct qed_link_params *params) +{ + struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; + const struct qed_mfw_speed_map *map; + u32 i; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + ext_speed->autoneg = !!params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + ext_speed->advertised_speeds = 0; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { + map = qed_mfw_ext_maps + i; + + if (linkmode_intersects(params->adv_speeds, map->caps)) + ext_speed->advertised_speeds |= map->mfw_val; + } + } + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { + switch (params->forced_speed) { + case SPEED_1000: + ext_speed->forced_speed = QED_EXT_SPEED_1G; + break; + case SPEED_10000: + ext_speed->forced_speed = QED_EXT_SPEED_10G; + break; + case SPEED_20000: + ext_speed->forced_speed = QED_EXT_SPEED_20G; + break; + case SPEED_25000: + ext_speed->forced_speed = QED_EXT_SPEED_25G; + break; + case SPEED_40000: + ext_speed->forced_speed = QED_EXT_SPEED_40G; + break; + case SPEED_50000: + ext_speed->forced_speed = QED_EXT_SPEED_50G_R | + QED_EXT_SPEED_50G_R2; + break; + case SPEED_100000: + ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | + QED_EXT_SPEED_100G_R4 | + QED_EXT_SPEED_100G_P4; + break; + default: + break; + } + } + + if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) + return; + + switch (params->forced_speed) { + case SPEED_25000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | + ETH_EXT_FEC_25G_BASE_R | + ETH_EXT_FEC_25G_NONE; + break; + default: + break; + } + + break; + case SPEED_40000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | + ETH_EXT_FEC_40G_NONE; + break; + default: + break; + } + + break; + case SPEED_50000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | + ETH_EXT_FEC_50G_BASE_R | + ETH_EXT_FEC_50G_NONE; + break; + default: + break; + } + + break; + case SPEED_100000: + switch (params->fec) { + case FEC_FORCE_MODE_NONE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; + break; + case FEC_FORCE_MODE_FIRECODE: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; + break; + case FEC_FORCE_MODE_RS: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; + break; + case FEC_FORCE_MODE_AUTO: + link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | + ETH_EXT_FEC_100G_BASE_R | + ETH_EXT_FEC_100G_NONE; + break; + default: + break; + } + + break; + default: + break; + } +} + +static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) +{ + struct qed_mcp_link_params *link_params; + struct qed_mcp_link_speed_params *speed; + const struct qed_mfw_speed_map *map; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int rc; + u32 i; + + if (!cdev) + return -ENODEV; + + /* The link should be set only once per PF */ + hwfn = &cdev->hwfns[0]; + + /* When VF wants to set link, force it to read the bulletin instead. + * This mimics the PF behavior, where a noitification [both immediate + * and possible later] would be generated when changing properties. + */ + if (IS_VF(cdev)) { + qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); + return 0; + } + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + link_params = qed_mcp_get_link_params(hwfn); + if (!link_params) + return -ENODATA; + + speed = &link_params->speed; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + speed->autoneg = !!params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { + speed->advertised_speeds = 0; + + for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { + map = qed_mfw_legacy_maps + i; + + if (linkmode_intersects(params->adv_speeds, map->caps)) + speed->advertised_speeds |= map->mfw_val; + } + } + + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) + speed->forced_speed = params->forced_speed; + + if (qed_mcp_is_ext_speed_supported(hwfn)) + qed_set_ext_speed_params(link_params, params); + + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { + if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + link_params->pause.autoneg = true; + else + link_params->pause.autoneg = false; + if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) + link_params->pause.forced_rx = true; + else + link_params->pause.forced_rx = false; + if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) + link_params->pause.forced_tx = true; + else + link_params->pause.forced_tx = false; + } + + if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { + switch (params->loopback_mode) { + case QED_LINK_LOOPBACK_INT_PHY: + link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; + break; + case QED_LINK_LOOPBACK_EXT_PHY: + link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; + break; + case QED_LINK_LOOPBACK_EXT: + link_params->loopback_mode = ETH_LOOPBACK_EXT; + break; + case QED_LINK_LOOPBACK_MAC: + link_params->loopback_mode = ETH_LOOPBACK_MAC; + break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_0123; + break; + case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: + link_params->loopback_mode = + ETH_LOOPBACK_CNIG_AH_ONLY_2301; + break; + case QED_LINK_LOOPBACK_PCS_AH_ONLY: + link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; + break; + case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; + break; + case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: + link_params->loopback_mode = + ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; + break; + default: + link_params->loopback_mode = ETH_LOOPBACK_NONE; + break; + } + } + + if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) + memcpy(&link_params->eee, ¶ms->eee, + sizeof(link_params->eee)); + + if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) + link_params->fec = params->fec; + + rc = qed_mcp_set_link(hwfn, ptt, params->link_up); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_get_port_type(u32 media_type) +{ + int port_type; + + switch (media_type) { + case MEDIA_SFPP_10G_FIBER: + case MEDIA_SFP_1G_FIBER: + case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: + port_type = PORT_FIBRE; + break; + case MEDIA_DA_TWINAX: + port_type = PORT_DA; + break; + case MEDIA_BASE_T: + port_type = PORT_TP; + break; + case MEDIA_KR: + case MEDIA_NOT_PRESENT: + port_type = PORT_NONE; + break; + case MEDIA_UNSPECIFIED: + default: + port_type = PORT_OTHER; + break; + } + return port_type; +} + +static int qed_get_link_data(struct qed_hwfn *hwfn, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *link_caps) +{ + void *p; + + if (!IS_PF(hwfn->cdev)) { + qed_vf_get_link_params(hwfn, params); + qed_vf_get_link_state(hwfn, link); + qed_vf_get_link_caps(hwfn, link_caps); + + return 0; + } + + p = qed_mcp_get_link_params(hwfn); + if (!p) + return -ENXIO; + memcpy(params, p, sizeof(*params)); + + p = qed_mcp_get_link_state(hwfn); + if (!p) + return -ENXIO; + memcpy(link, p, sizeof(*link)); + + p = qed_mcp_get_link_capabilities(hwfn); + if (!p) + return -ENXIO; + memcpy(link_caps, p, sizeof(*link_caps)); + + return 0; +} + +static void qed_fill_link_capability(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, u32 capability, + unsigned long *if_caps) +{ + u32 media_type, tcvr_state, tcvr_type; + u32 speed_mask, board_cfg; + + if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) + media_type = MEDIA_UNSPECIFIED; + + if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) + tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; + + if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) + speed_mask = 0xFFFFFFFF; + + if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) + board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", + media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); + + switch (media_type) { + case MEDIA_DA_TWINAX: + phylink_set(if_caps, FIBRE); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + phylink_set(if_caps, 20000baseKR2_Full); + + /* For DAC media multiple speed capabilities are supported */ + capability |= speed_mask; + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseKX_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseCR_Full); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + phylink_set(if_caps, 40000baseCR4_Full); + break; + default: + break; + } + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + phylink_set(if_caps, 25000baseCR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseCR2_Full); + + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + phylink_set(if_caps, 100000baseCR4_Full); + break; + default: + break; + } + + break; + case MEDIA_BASE_T: + phylink_set(if_caps, TP); + + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseT_Full); + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseT_Full); + } + + if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { + phylink_set(if_caps, FIBRE); + + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_1000BASET: + phylink_set(if_caps, 1000baseT_Full); + break; + case ETH_TRANSCEIVER_TYPE_10G_BASET: + phylink_set(if_caps, 10000baseT_Full); + break; + default: + break; + } + } + + break; + case MEDIA_SFP_1G_FIBER: + case MEDIA_SFPP_10G_FIBER: + case MEDIA_XFP_FIBER: + case MEDIA_MODULE_FIBER: + phylink_set(if_caps, FIBRE); + capability |= speed_mask; + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: + phylink_set(if_caps, 1000baseKX_Full); + break; + default: + break; + } + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + phylink_set(if_caps, 10000baseSR_Full); + break; + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: + phylink_set(if_caps, 10000baseLR_Full); + break; + case ETH_TRANSCEIVER_TYPE_10G_LRM: + phylink_set(if_caps, 10000baseLRM_Full); + break; + case ETH_TRANSCEIVER_TYPE_10G_ER: + phylink_set(if_caps, 10000baseR_FEC); + break; + default: + break; + } + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + phylink_set(if_caps, 20000baseKR2_Full); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + phylink_set(if_caps, 25000baseSR_Full); + break; + default: + break; + } + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + phylink_set(if_caps, 40000baseLR4_Full); + break; + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + phylink_set(if_caps, 40000baseSR4_Full); + break; + default: + break; + } + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); + + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + switch (tcvr_type) { + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + phylink_set(if_caps, 100000baseSR4_Full); + break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + phylink_set(if_caps, 100000baseLR4_ER4_Full); + break; + default: + break; + } + + break; + case MEDIA_KR: + phylink_set(if_caps, Backplane); + + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) + phylink_set(if_caps, 20000baseKR2_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) + phylink_set(if_caps, 1000baseKX_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) + phylink_set(if_caps, 10000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) + phylink_set(if_caps, 25000baseKR_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) + phylink_set(if_caps, 40000baseKR4_Full); + if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) + phylink_set(if_caps, 50000baseKR2_Full); + if (capability & + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) + phylink_set(if_caps, 100000baseKR4_Full); + + break; + case MEDIA_UNSPECIFIED: + case MEDIA_NOT_PRESENT: + default: + DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, + "Unknown media and transceiver type;\n"); + break; + } +} + +static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) +{ + *speed_mask = 0; + + if (caps & + (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + if (caps & QED_LINK_PARTNER_SPEED_10G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + if (caps & QED_LINK_PARTNER_SPEED_20G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; + if (caps & QED_LINK_PARTNER_SPEED_25G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + if (caps & QED_LINK_PARTNER_SPEED_40G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + if (caps & QED_LINK_PARTNER_SPEED_50G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; + if (caps & QED_LINK_PARTNER_SPEED_100G) + *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; +} + +static void qed_fill_link(struct qed_hwfn *hwfn, + struct qed_ptt *ptt, + struct qed_link_output *if_link) +{ + struct qed_mcp_link_capabilities link_caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + u32 media_type, speed_mask; + + memset(if_link, 0, sizeof(*if_link)); + + /* Prepare source inputs */ + if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { + dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); + return; + } + + /* Set the link parameters to pass to protocol driver */ + if (link.link_up) + if_link->link_up = true; + + if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { + if (link_caps.default_ext_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.ext_speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + + qed_fill_link_capability(hwfn, ptt, + params.ext_speed.advertised_speeds, + if_link->advertised_caps); + } else { + if (link_caps.default_speed_autoneg) + phylink_set(if_link->supported_caps, Autoneg); + + linkmode_copy(if_link->advertised_caps, if_link->supported_caps); + + if (params.speed.autoneg) + phylink_set(if_link->advertised_caps, Autoneg); + else + phylink_clear(if_link->advertised_caps, Autoneg); + } + + if (params.pause.autoneg || + (params.pause.forced_rx && params.pause.forced_tx)) + phylink_set(if_link->supported_caps, Asym_Pause); + if (params.pause.autoneg || params.pause.forced_rx || + params.pause.forced_tx) + phylink_set(if_link->supported_caps, Pause); + + if_link->sup_fec = link_caps.fec_default; + if_link->active_fec = params.fec; + + /* Fill link advertised capability */ + qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, + if_link->advertised_caps); + + /* Fill link supported capability */ + qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, + if_link->supported_caps); + + /* Fill partner advertised capability */ + qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); + qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); + + if (link.link_up) + if_link->speed = link.speed; + + /* TODO - fill duplex properly */ + if_link->duplex = DUPLEX_FULL; + qed_mcp_get_media_type(hwfn, ptt, &media_type); + if_link->port = qed_get_port_type(media_type); + + if_link->autoneg = params.speed.autoneg; + + if (params.pause.autoneg) + if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + if (params.pause.forced_rx) + if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; + if (params.pause.forced_tx) + if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; + + if (link.an_complete) + phylink_set(if_link->lp_caps, Autoneg); + if (link.partner_adv_pause) + phylink_set(if_link->lp_caps, Pause); + if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || + link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) + phylink_set(if_link->lp_caps, Asym_Pause); + + if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { + if_link->eee_supported = false; + } else { + if_link->eee_supported = true; + if_link->eee_active = link.eee_active; + if_link->sup_caps = link_caps.eee_speed_caps; + /* MFW clears adv_caps on eee disable; use configured value */ + if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : + params.eee.adv_caps; + if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; + if_link->eee.enable = params.eee.enable; + if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; + if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; + } +} + +static void qed_get_current_link(struct qed_dev *cdev, + struct qed_link_output *if_link) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int i; + + hwfn = &cdev->hwfns[0]; + if (IS_PF(cdev)) { + ptt = qed_ptt_acquire(hwfn); + if (ptt) { + qed_fill_link(hwfn, ptt, if_link); + qed_ptt_release(hwfn, ptt); + } else { + DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); + } + } else { + qed_fill_link(hwfn, NULL, if_link); + } + + for_each_hwfn(cdev, i) + qed_inform_vf_link_state(&cdev->hwfns[i]); +} + +void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) +{ + void *cookie = hwfn->cdev->ops_cookie; + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + struct qed_link_output if_link; + + qed_fill_link(hwfn, ptt, &if_link); + qed_inform_vf_link_state(hwfn); + + if (IS_LEAD_HWFN(hwfn) && cookie) + op->link_update(cookie, &if_link); +} + +void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) +{ + void *cookie = hwfn->cdev->ops_cookie; + struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; + + if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) + op->bw_update(cookie); +} + +static int qed_drain(struct qed_dev *cdev) +{ + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int i, rc; + + if (IS_VF(cdev)) + return 0; + + for_each_hwfn(cdev, i) { + hwfn = &cdev->hwfns[i]; + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); + return -EBUSY; + } + rc = qed_mcp_drain(hwfn, ptt); + qed_ptt_release(hwfn, ptt); + if (rc) + return rc; + } + + return 0; +} + +static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, + struct qed_nvm_image_att *nvm_image, + u32 *crc) +{ + u8 *buf = NULL; + int rc; + + /* Allocate a buffer for holding the nvram image */ + buf = kzalloc(nvm_image->length, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + /* Read image into buffer */ + rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, + buf, nvm_image->length); + if (rc) { + DP_ERR(cdev, "Failed reading image from nvm\n"); + goto out; + } + + /* Convert the buffer into big-endian format (excluding the + * closing 4 bytes of CRC). + */ + cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, + DIV_ROUND_UP(nvm_image->length - 4, 4)); + + /* Calc CRC for the "actual" image buffer, i.e. not including + * the last 4 CRC bytes. + */ + *crc = ~crc32(~0U, buf, nvm_image->length - 4); + *crc = (__force u32)cpu_to_be32p(crc); + +out: + kfree(buf); + + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x4 [command index] | + * 4B | image_type | Options | Number of register settings | + * 8B | Value | + * 12B | Mask | + * 16B | Offset | + * \----------------------------------------------------------------------/ + * There can be several Value-Mask-Offset sets as specified by 'Number of...'. + * Options - 0'b - Calculate & Update CRC for image + */ +static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, + bool *check_resp) +{ + struct qed_nvm_image_att nvm_image; + struct qed_hwfn *p_hwfn; + bool is_crc = false; + u32 image_type; + int rc = 0, i; + u16 len; + + *data += 4; + image_type = **data; + p_hwfn = QED_LEADING_HWFN(cdev); + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) + if (image_type == p_hwfn->nvm_info.image_att[i].image_type) + break; + if (i == p_hwfn->nvm_info.num_images) { + DP_ERR(cdev, "Failed to find nvram image of type %08x\n", + image_type); + return -ENOENT; + } + + nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; + nvm_image.length = p_hwfn->nvm_info.image_att[i].len; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", + **data, image_type, nvm_image.start_addr, + nvm_image.start_addr + nvm_image.length - 1); + (*data)++; + is_crc = !!(**data & BIT(0)); + (*data)++; + len = *((u16 *)*data); + *data += 2; + if (is_crc) { + u32 crc = 0; + + rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); + if (rc) { + DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); + goto exit; + } + + rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, + (nvm_image.start_addr + + nvm_image.length - 4), (u8 *)&crc, 4); + if (rc) + DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", + nvm_image.start_addr + nvm_image.length - 4, rc); + goto exit; + } + + /* Iterate over the values for setting */ + while (len) { + u32 offset, mask, value, cur_value; + u8 buf[4]; + + value = *((u32 *)*data); + *data += 4; + mask = *((u32 *)*data); + *data += 4; + offset = *((u32 *)*data); + *data += 4; + + rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, + 4); + if (rc) { + DP_ERR(cdev, "Failed reading from %08x\n", + nvm_image.start_addr + offset); + goto exit; + } + + cur_value = le32_to_cpu(*((__le32 *)buf)); + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", + nvm_image.start_addr + offset, cur_value, + (cur_value & ~mask) | (value & mask), value, mask); + value = (value & mask) | (cur_value & ~mask); + rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, + nvm_image.start_addr + offset, + (u8 *)&value, 4); + if (rc) { + DP_ERR(cdev, "Failed writing to %08x\n", + nvm_image.start_addr + offset); + goto exit; + } + + len--; + } +exit: + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x3 [command index] | + * 4B | b'0: check_response? | b'1-31 reserved | + * 8B | File-type | reserved | + * 12B | Image length in bytes | + * \----------------------------------------------------------------------/ + * Start a new file of the provided type + */ +static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, + const u8 **data, bool *check_resp) +{ + u32 file_type, file_size = 0; + int rc; + + *data += 4; + *check_resp = !!(**data & BIT(0)); + *data += 4; + file_type = **data; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "About to start a new file of type %02x\n", file_type); + if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { + *data += 4; + file_size = *((u32 *)(*data)); + } + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, + (u8 *)(&file_size), 4); + *data += 4; + + return rc; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x2 [command index] | + * 4B | Length in bytes | + * 8B | b'0: check_response? | b'1-31 reserved | + * 12B | Offset in bytes | + * 16B | Data ... | + * \----------------------------------------------------------------------/ + * Write data as part of a file that was previously started. Data should be + * of length equal to that provided in the message + */ +static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, + const u8 **data, bool *check_resp) +{ + u32 offset, len; + int rc; + + *data += 4; + len = *((u32 *)(*data)); + *data += 4; + *check_resp = !!(**data & BIT(0)); + *data += 4; + offset = *((u32 *)(*data)); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "About to write File-data: %08x bytes to offset %08x\n", + len, offset); + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, + (char *)(*data), len); + *data += len; + + return rc; +} + +/* Binary file format [General header] - + * /----------------------------------------------------------------------\ + * 0B | QED_NVM_SIGNATURE | + * 4B | Length in bytes | + * 8B | Highest command in this batchfile | Reserved | + * \----------------------------------------------------------------------/ + */ +static int qed_nvm_flash_image_validate(struct qed_dev *cdev, + const struct firmware *image, + const u8 **data) +{ + u32 signature, len; + + /* Check minimum size */ + if (image->size < 12) { + DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); + return -EINVAL; + } + + /* Check signature */ + signature = *((u32 *)(*data)); + if (signature != QED_NVM_SIGNATURE) { + DP_ERR(cdev, "Wrong signature '%08x'\n", signature); + return -EINVAL; + } + + *data += 4; + /* Validate internal size equals the image-size */ + len = *((u32 *)(*data)); + if (len != image->size) { + DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", + len, (u32)image->size); + return -EINVAL; + } + + *data += 4; + /* Make sure driver familiar with all commands necessary for this */ + if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { + DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", + *((u16 *)(*data))); + return -EINVAL; + } + + *data += 4; + + return 0; +} + +/* Binary file format - + * /----------------------------------------------------------------------\ + * 0B | 0x5 [command index] | + * 4B | Number of config attributes | Reserved | + * 4B | Config ID | Entity ID | Length | + * 4B | Value | + * | | + * \----------------------------------------------------------------------/ + * There can be several cfg_id-entity_id-Length-Value sets as specified by + * 'Number of config attributes'. + * + * The API parses config attributes from the user provided buffer and flashes + * them to the respective NVM path using Management FW inerface. + */ +static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 entity_id, len, buf[32]; + bool need_nvm_init = true; + struct qed_ptt *ptt; + u16 cfg_id, count; + int rc = 0, i; + u32 flags; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + /* NVM CFG ID attribute header */ + *data += 4; + count = *((u16 *)*data); + *data += 4; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config ids: num_attrs = %0d\n", count); + /* NVM CFG ID attributes. Start loop index from 1 to avoid additional + * arithmetic operations in the implementation. + */ + for (i = 1; i <= count; i++) { + cfg_id = *((u16 *)*data); + *data += 2; + entity_id = **data; + (*data)++; + len = **data; + (*data)++; + memcpy(buf, *data, len); + *data += len; + + flags = 0; + if (need_nvm_init) { + flags |= QED_NVM_CFG_OPTION_INIT; + need_nvm_init = false; + } + + /* Commit to flash and free the resources */ + if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { + flags |= QED_NVM_CFG_OPTION_COMMIT | + QED_NVM_CFG_OPTION_FREE; + need_nvm_init = true; + } + + if (entity_id) + flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "cfg_id = %d entity = %d len = %d\n", cfg_id, + entity_id, len); + rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, + buf, len); + if (rc) { + DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); + break; + } + } + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +#define QED_MAX_NVM_BUF_LEN 32 +static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + u8 buf[QED_MAX_NVM_BUF_LEN]; + struct qed_ptt *ptt; + u32 len; + int rc; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return QED_MAX_NVM_BUF_LEN; + + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, + &len); + if (rc || !len) { + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + len = QED_MAX_NVM_BUF_LEN; + } + + qed_ptt_release(hwfn, ptt); + + return len; +} + +static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, + u32 cmd, u32 entity_id) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + u32 flags, len; + int rc = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Read config cmd = %d entity id %d\n", cmd, entity_id); + flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; + rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); + if (rc) + DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_nvm_flash(struct qed_dev *cdev, const char *name) +{ + const struct firmware *image; + const u8 *data, *data_end; + u32 cmd_type; + int rc; + + rc = request_firmware(&image, name, &cdev->pdev->dev); + if (rc) { + DP_ERR(cdev, "Failed to find '%s'\n", name); + return rc; + } + + DP_VERBOSE(cdev, NETIF_MSG_DRV, + "Flashing '%s' - firmware's data at %p, size is %08x\n", + name, image->data, (u32)image->size); + data = image->data; + data_end = data + image->size; + + rc = qed_nvm_flash_image_validate(cdev, image, &data); + if (rc) + goto exit; + + while (data < data_end) { + bool check_resp = false; + + /* Parse the actual command */ + cmd_type = *((u32 *)data); + switch (cmd_type) { + case QED_NVM_FLASH_CMD_FILE_DATA: + rc = qed_nvm_flash_image_file_data(cdev, &data, + &check_resp); + break; + case QED_NVM_FLASH_CMD_FILE_START: + rc = qed_nvm_flash_image_file_start(cdev, &data, + &check_resp); + break; + case QED_NVM_FLASH_CMD_NVM_CHANGE: + rc = qed_nvm_flash_image_access(cdev, &data, + &check_resp); + break; + case QED_NVM_FLASH_CMD_NVM_CFG_ID: + rc = qed_nvm_flash_cfg_write(cdev, &data); + break; + default: + DP_ERR(cdev, "Unknown command %08x\n", cmd_type); + rc = -EINVAL; + goto exit; + } + + if (rc) { + DP_ERR(cdev, "Command %08x failed\n", cmd_type); + goto exit; + } + + /* Check response if needed */ + if (check_resp) { + u32 mcp_response = 0; + + if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { + DP_ERR(cdev, "Failed getting MCP response\n"); + rc = -EINVAL; + goto exit; + } + + switch (mcp_response & FW_MSG_CODE_MASK) { + case FW_MSG_CODE_OK: + case FW_MSG_CODE_NVM_OK: + case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: + case FW_MSG_CODE_PHY_OK: + break; + default: + DP_ERR(cdev, "MFW returns error: %08x\n", + mcp_response); + rc = -EINVAL; + goto exit; + } + } + } + +exit: + release_firmware(image); + + return rc; +} + +static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, + u8 *buf, u16 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + + return qed_mcp_get_nvm_image(hwfn, type, buf, len); +} + +void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) +{ + struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; + void *cookie = p_hwfn->cdev->ops_cookie; + + if (ops && ops->schedule_recovery_handler) + ops->schedule_recovery_handler(cookie); +} + +static const char * const qed_hw_err_type_descr[] = { + [QED_HW_ERR_FAN_FAIL] = "Fan Failure", + [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", + [QED_HW_ERR_HW_ATTN] = "HW Attention", + [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", + [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", + [QED_HW_ERR_FW_ASSERT] = "FW Assertion", + [QED_HW_ERR_LAST] = "Unknown", +}; + +void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, + enum qed_hw_err_type err_type) +{ + struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; + void *cookie = p_hwfn->cdev->ops_cookie; + const char *err_str; + + if (err_type > QED_HW_ERR_LAST) + err_type = QED_HW_ERR_LAST; + err_str = qed_hw_err_type_descr[err_type]; + + DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); + + /* Call the HW error handler of the protocol driver. + * If it is not available - perform a minimal handling of preventing + * HW attentions from being reasserted. + */ + if (ops && ops->schedule_hw_err_handler) + ops->schedule_hw_err_handler(cookie, err_type); + else + qed_int_attn_clr_enable(p_hwfn->cdev, true); +} + +static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, + void *handle) +{ + return qed_set_queue_coalesce(rx_coal, tx_coal, handle); +} + +static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_set_led(hwfn, ptt, mode); + + qed_ptt_release(hwfn, ptt); + + return status; +} + +int qed_recovery_process(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EAGAIN; + + rc = qed_start_recovery_process(p_hwfn, p_ptt); + + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +static int qed_update_wol(struct qed_dev *cdev, bool enabled) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED + : QED_OV_WOL_DISABLED); + if (rc) + goto out; + rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return rc; +} + +static int qed_update_drv_state(struct qed_dev *cdev, bool active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? + QED_OV_DRIVER_STATE_ACTIVE : + QED_OV_DRIVER_STATE_DISABLED); + + qed_ptt_release(hwfn, ptt); + + return status; +} + +static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mac(hwfn, ptt, mac); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + +static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int status = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); + if (status) + goto out; + + status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); + +out: + qed_ptt_release(hwfn, ptt); + return status; +} + +static int +qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, + u16 qid, struct qed_sb_info_dbg *sb_dbg) +{ + struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; + struct qed_ptt *ptt; + int rc; + + if (IS_VF(cdev)) + return -EINVAL; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Can't acquire PTT\n"); + return -EAGAIN; + } + + memset(sb_dbg, 0, sizeof(*sb_dbg)); + rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); + + qed_ptt_release(hwfn, ptt); + return rc; +} + +static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, + u8 dev_addr, u32 offset, u32 len) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, + offset, len, buf); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_dbg_grc_config(hwfn, cfg_id, val); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) +{ + char buf[QED_MFW_REPORT_STR_SIZE]; + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + va_list vl; + + va_start(vl, fmt); + vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); + va_end(vl); + + if (IS_PF(cdev)) { + p_hwfn = QED_LEADING_HWFN(cdev); + p_ptt = qed_ptt_acquire(p_hwfn); + if (p_ptt) { + qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); + qed_ptt_release(p_hwfn, p_ptt); + } + } +} + +static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) +{ + return QED_AFFIN_HWFN_IDX(cdev); +} + +static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *ptt; + int rc = 0; + + *esl_active = false; + + if (IS_VF(cdev)) + return 0; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) + return -EAGAIN; + + rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); + + qed_ptt_release(hwfn, ptt); + + return rc; +} + +static struct qed_selftest_ops qed_selftest_ops_pass = { + .selftest_memory = &qed_selftest_memory, + .selftest_interrupt = &qed_selftest_interrupt, + .selftest_register = &qed_selftest_register, + .selftest_clock = &qed_selftest_clock, + .selftest_nvram = &qed_selftest_nvram, +}; + +const struct qed_common_ops qed_common_ops_pass = { + .selftest = &qed_selftest_ops_pass, + .probe = &qed_probe, + .remove = &qed_remove, + .set_power_state = &qed_set_power_state, + .set_name = &qed_set_name, + .update_pf_params = &qed_update_pf_params, + .slowpath_start = &qed_slowpath_start, + .slowpath_stop = &qed_slowpath_stop, + .set_fp_int = &qed_set_int_fp, + .get_fp_int = &qed_get_int_fp, + .sb_init = &qed_sb_init, + .sb_release = &qed_sb_release, + .simd_handler_config = &qed_simd_handler_config, + .simd_handler_clean = &qed_simd_handler_clean, + .dbg_grc = &qed_dbg_grc, + .dbg_grc_size = &qed_dbg_grc_size, + .can_link_change = &qed_can_link_change, + .set_link = &qed_set_link, + .get_link = &qed_get_current_link, + .drain = &qed_drain, + .update_msglvl = &qed_init_dp, + .devlink_register = qed_devlink_register, + .devlink_unregister = qed_devlink_unregister, + .report_fatal_error = qed_report_fatal_error, + .dbg_all_data = &qed_dbg_all_data, + .dbg_all_data_size = &qed_dbg_all_data_size, + .chain_alloc = &qed_chain_alloc, + .chain_free = &qed_chain_free, + .nvm_flash = &qed_nvm_flash, + .nvm_get_image = &qed_nvm_get_image, + .set_coalesce = &qed_set_coalesce, + .set_led = &qed_set_led, + .recovery_process = &qed_recovery_process, + .recovery_prolog = &qed_recovery_prolog, + .attn_clr_enable = &qed_int_attn_clr_enable, + .update_drv_state = &qed_update_drv_state, + .update_mac = &qed_update_mac, + .update_mtu = &qed_update_mtu, + .update_wol = &qed_update_wol, + .db_recovery_add = &qed_db_recovery_add, + .db_recovery_del = &qed_db_recovery_del, + .read_module_eeprom = &qed_read_module_eeprom, + .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, + .read_nvm_cfg = &qed_nvm_flash_cfg_read, + .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, + .set_grc_config = &qed_set_grc_config, + .mfw_report = &qed_mfw_report, + .get_sb_info = &qed_get_sb_info, + .get_esl_status = &qed_get_esl_status, +}; + +void qed_get_protocol_stats(struct qed_dev *cdev, + enum qed_mcp_protocol_type type, + union qed_mcp_protocol_stats *stats) +{ + struct qed_eth_stats eth_stats; + + memset(stats, 0, sizeof(*stats)); + + switch (type) { + case QED_MCP_LAN_STATS: + qed_get_vport_stats_context(cdev, ð_stats, true); + stats->lan_stats.ucast_rx_pkts = + eth_stats.common.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = + eth_stats.common.tx_ucast_pkts; + stats->lan_stats.fcs_err = -1; + break; + case QED_MCP_FCOE_STATS: + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); + break; + case QED_MCP_ISCSI_STATS: + qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); + break; + default: + DP_VERBOSE(cdev, QED_MSG_SP, + "Invalid protocol type = %d\n", type); + return; + } +} + +int qed_mfw_tlv_req(struct qed_hwfn *hwfn) +{ + DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, + "Scheduling slowpath task [Flag: %d]\n", + QED_SLOWPATH_MFW_TLV_REQ); + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); + /* Memory barrier after setting atomic bit */ + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + + return 0; +} + +static void +qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) +{ + struct qed_common_cb_ops *op = cdev->protocol_ops.common; + struct qed_eth_stats_common *p_common; + struct qed_generic_tlvs gen_tlvs; + struct qed_eth_stats stats; + int i; + + memset(&gen_tlvs, 0, sizeof(gen_tlvs)); + op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); + + if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) + tlv->flags.ipv4_csum_offload = true; + if (gen_tlvs.feat_flags & QED_TLV_LSO) + tlv->flags.lso_supported = true; + tlv->flags.b_set = true; + + for (i = 0; i < QED_TLV_MAC_COUNT; i++) { + if (is_valid_ether_addr(gen_tlvs.mac[i])) { + ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); + tlv->mac_set[i] = true; + } + } + + qed_get_vport_stats(cdev, &stats); + p_common = &stats.common; + tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + + p_common->rx_bcast_pkts; + tlv->rx_frames_set = true; + tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + + p_common->rx_bcast_bytes; + tlv->rx_bytes_set = true; + tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + + p_common->tx_bcast_pkts; + tlv->tx_frames_set = true; + tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + + p_common->tx_bcast_bytes; + tlv->rx_bytes_set = true; +} + +int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, + union qed_mfw_tlv_data *tlv_buf) +{ + struct qed_dev *cdev = hwfn->cdev; + struct qed_common_cb_ops *ops; + + ops = cdev->protocol_ops.common; + if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { + DP_NOTICE(hwfn, "Can't collect TLV management info\n"); + return -EINVAL; + } + + switch (type) { + case QED_MFW_TLV_GENERIC: + qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); + break; + case QED_MFW_TLV_ETH: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); + break; + case QED_MFW_TLV_FCOE: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); + break; + case QED_MFW_TLV_ISCSI: + ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); + break; + default: + break; + } + + return 0; +} + +unsigned long qed_get_epoch_time(void) +{ + return ktime_get_real_seconds(); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c new file mode 100644 index 0000000000..16e6bd4661 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -0,0 +1,4244 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/etherdevice.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_mfw_hsi.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sriov.h" + +#define GRCBASE_MCP 0xe00000 + +#define QED_MCP_RESP_ITER_US 10 + +#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ +#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ + +#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ + qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \ + _val) + +#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ + qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset))) + +#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ + DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ + offsetof(struct public_drv_mb, _field), _val) + +#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ + DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ + offsetof(struct public_drv_mb, _field)) + +#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ + DRV_ID_PDA_COMP_VER_SHIFT) + +#define MCP_BYTES_PER_MBIT_SHIFT 17 + +bool qed_mcp_is_init(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) + return false; + return true; +} + +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PORT); + u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr); + + p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, + MFW_PORT(p_hwfn)); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "port_addr = 0x%x, port_id 0x%02x\n", + p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); +} + +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); + u32 tmp, i; + + if (!p_hwfn->mcp_info->public_base) + return; + + for (i = 0; i < length; i++) { + tmp = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->mfw_mb_addr + + (i << 2) + sizeof(u32)); + + /* The MB data is actually BE; Need to force it to cpu */ + ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = + be32_to_cpu((__force __be32)tmp); + } +} + +struct qed_mcp_cmd_elem { + struct list_head list; + struct qed_mcp_mb_params *p_mb_params; + u16 expected_seq_num; + bool b_is_completed; +}; + +/* Must be called while cmd_lock is acquired */ +static struct qed_mcp_cmd_elem * +qed_mcp_cmd_add_elem(struct qed_hwfn *p_hwfn, + struct qed_mcp_mb_params *p_mb_params, + u16 expected_seq_num) +{ + struct qed_mcp_cmd_elem *p_cmd_elem = NULL; + + p_cmd_elem = kzalloc(sizeof(*p_cmd_elem), GFP_ATOMIC); + if (!p_cmd_elem) + goto out; + + p_cmd_elem->p_mb_params = p_mb_params; + p_cmd_elem->expected_seq_num = expected_seq_num; + list_add(&p_cmd_elem->list, &p_hwfn->mcp_info->cmd_list); +out: + return p_cmd_elem; +} + +/* Must be called while cmd_lock is acquired */ +static void qed_mcp_cmd_del_elem(struct qed_hwfn *p_hwfn, + struct qed_mcp_cmd_elem *p_cmd_elem) +{ + list_del(&p_cmd_elem->list); + kfree(p_cmd_elem); +} + +/* Must be called while cmd_lock is acquired */ +static struct qed_mcp_cmd_elem *qed_mcp_cmd_get_elem(struct qed_hwfn *p_hwfn, + u16 seq_num) +{ + struct qed_mcp_cmd_elem *p_cmd_elem = NULL; + + list_for_each_entry(p_cmd_elem, &p_hwfn->mcp_info->cmd_list, list) { + if (p_cmd_elem->expected_seq_num == seq_num) + return p_cmd_elem; + } + + return NULL; +} + +int qed_mcp_free(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->mcp_info) { + struct qed_mcp_cmd_elem *p_cmd_elem = NULL, *p_tmp; + + kfree(p_hwfn->mcp_info->mfw_mb_cur); + kfree(p_hwfn->mcp_info->mfw_mb_shadow); + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + list_for_each_entry_safe(p_cmd_elem, + p_tmp, + &p_hwfn->mcp_info->cmd_list, list) { + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + } + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + } + + kfree(p_hwfn->mcp_info); + p_hwfn->mcp_info = NULL; + + return 0; +} + +/* Maximum of 1 sec to wait for the SHMEM ready indication */ +#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20 +#define QED_MCP_SHMEM_RDY_ITER_MS 50 + +static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *p_info = p_hwfn->mcp_info; + u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES; + u8 msec = QED_MCP_SHMEM_RDY_ITER_MS; + u32 drv_mb_offsize, mfw_mb_offsize; + u32 mcp_pf_id = MCP_PF_ID(p_hwfn); + + p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); + if (!p_info->public_base) { + DP_NOTICE(p_hwfn, + "The address of the MCP scratch-pad is not configured\n"); + return -EINVAL; + } + + p_info->public_base |= GRCBASE_MCP; + + /* Get the MFW MB address and number of supported messages */ + mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, + sup_msgs)); + + /* The driver can notify that there was an MCP reset, and might read the + * SHMEM values before the MFW has completed initializing them. + * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a + * data ready indication. + */ + while (!p_info->mfw_mb_length && --cnt) { + msleep(msec); + p_info->mfw_mb_length = + (u16)qed_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr + + offsetof(struct public_mfw_mb, sup_msgs)); + } + + if (!cnt) { + DP_NOTICE(p_hwfn, + "Failed to get the SHMEM ready notification after %d msec\n", + QED_MCP_SHMEM_RDY_MAX_RETRIES * msec); + return -EBUSY; + } + + /* Calculate the driver and MFW mailbox address */ + drv_mb_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_DRV_MB)); + p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", + drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); + + /* Get the current driver mailbox sequence before sending + * the first command + */ + p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Get current FW pulse sequence */ + p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & + DRV_PULSE_SEQ_MASK; + + p_info->mcp_hist = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + return 0; +} + +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *p_info; + u32 size; + + /* Allocate mcp_info structure */ + p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); + if (!p_hwfn->mcp_info) + goto err; + p_info = p_hwfn->mcp_info; + + /* Initialize the MFW spinlock */ + spin_lock_init(&p_info->cmd_lock); + spin_lock_init(&p_info->link_lock); + spin_lock_init(&p_info->unload_lock); + + INIT_LIST_HEAD(&p_info->cmd_list); + + if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) { + DP_NOTICE(p_hwfn, "MCP is not initialized\n"); + /* Do not free mcp_info here, since public_base indicate that + * the MCP is not initialized + */ + return 0; + } + + size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); + p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); + p_info->mfw_mb_shadow = kzalloc(size, GFP_KERNEL); + if (!p_info->mfw_mb_cur || !p_info->mfw_mb_shadow) + goto err; + + return 0; + +err: + qed_mcp_free(p_hwfn); + return -ENOMEM; +} + +static void qed_mcp_reread_offsets(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 generic_por_0 = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + /* Use MCP history register to check if MCP reset occurred between init + * time and now. + */ + if (p_hwfn->mcp_info->mcp_hist != generic_por_0) { + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n", + p_hwfn->mcp_info->mcp_hist, generic_por_0); + + qed_load_mcp_offsets(p_hwfn, p_ptt); + qed_mcp_cmd_port_init(p_hwfn, p_ptt); + } +} + +int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 org_mcp_reset_seq, seq, delay = QED_MCP_RESP_ITER_US, cnt = 0; + int rc = 0; + + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n"); + return -EBUSY; + } + + /* Ensure that only a single thread is accessing the mailbox */ + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + + org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + + /* Set drv command along with the updated sequence */ + qed_mcp_reread_offsets(p_hwfn, p_ptt); + seq = ++p_hwfn->mcp_info->drv_mb_seq; + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); + + do { + /* Wait for MFW response */ + udelay(delay); + /* Give the FW up to 500 second (50*1000*10usec) */ + } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt, + MISCS_REG_GENERIC_POR_0)) && + (cnt++ < QED_MCP_RESET_RETRIES)); + + if (org_mcp_reset_seq != + qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "MCP was reset after %d usec\n", cnt * delay); + } else { + DP_ERR(p_hwfn, "Failed to reset MCP\n"); + rc = -EAGAIN; + } + + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + return rc; +} + +/* Must be called while cmd_lock is acquired */ +static bool qed_mcp_has_pending_cmd(struct qed_hwfn *p_hwfn) +{ + struct qed_mcp_cmd_elem *p_cmd_elem; + + /* There is at most one pending command at a certain time, and if it + * exists - it is placed at the HEAD of the list. + */ + if (!list_empty(&p_hwfn->mcp_info->cmd_list)) { + p_cmd_elem = list_first_entry(&p_hwfn->mcp_info->cmd_list, + struct qed_mcp_cmd_elem, list); + return !p_cmd_elem->b_is_completed; + } + + return false; +} + +/* Must be called while cmd_lock is acquired */ +static int +qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params *p_mb_params; + struct qed_mcp_cmd_elem *p_cmd_elem; + u32 mcp_resp; + u16 seq_num; + + mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + seq_num = (u16)(mcp_resp & FW_MSG_SEQ_NUMBER_MASK); + + /* Return if no new non-handled response has been received */ + if (seq_num != p_hwfn->mcp_info->drv_mb_seq) + return -EAGAIN; + + p_cmd_elem = qed_mcp_cmd_get_elem(p_hwfn, seq_num); + if (!p_cmd_elem) { + DP_ERR(p_hwfn, + "Failed to find a pending mailbox cmd that expects sequence number %d\n", + seq_num); + return -EINVAL; + } + + p_mb_params = p_cmd_elem->p_mb_params; + + /* Get the MFW response along with the sequence number */ + p_mb_params->mcp_resp = mcp_resp; + + /* Get the MFW param */ + p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); + + /* Get the union data */ + if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) { + u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, + union_data); + qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, + union_data_addr, p_mb_params->data_dst_size); + } + + p_cmd_elem->b_is_completed = true; + + return 0; +} + +/* Must be called while cmd_lock is acquired */ +static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params, + u16 seq_num) +{ + union drv_union_data union_data; + u32 union_data_addr; + + /* Set the union data */ + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data); + memset(&union_data, 0, sizeof(union_data)); + if (p_mb_params->p_data_src && p_mb_params->data_src_size) + memcpy(&union_data, p_mb_params->p_data_src, + p_mb_params->data_src_size); + qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data, + sizeof(union_data)); + + /* Set the drv param */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, p_mb_params->param); + + /* Set the drv command along with the sequence number */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (p_mb_params->cmd | seq_num)); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "MFW mailbox: command 0x%08x param 0x%08x\n", + (p_mb_params->cmd | seq_num), p_mb_params->param); +} + +static void qed_mcp_cmd_set_blocking(struct qed_hwfn *p_hwfn, bool block_cmd) +{ + p_hwfn->mcp_info->b_block_cmd = block_cmd; + + DP_INFO(p_hwfn, "%s sending of mailbox commands to the MFW\n", + block_cmd ? "Block" : "Unblock"); +} + +static void qed_mcp_print_cpu_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2; + u32 delay = QED_MCP_RESP_ITER_US; + + cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + cpu_pc_0 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_1 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + udelay(delay); + cpu_pc_2 = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_PROGRAM_COUNTER); + + DP_NOTICE(p_hwfn, + "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n", + cpu_mode, cpu_state, cpu_pc_0, cpu_pc_1, cpu_pc_2); +} + +static int +_qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params, + u32 max_retries, u32 usecs) +{ + u32 cnt = 0, msecs = DIV_ROUND_UP(usecs, 1000); + struct qed_mcp_cmd_elem *p_cmd_elem; + u16 seq_num; + int rc = 0; + + /* Wait until the mailbox is non-occupied */ + do { + /* Exit the loop if there is no pending command, or if the + * pending command is completed during this iteration. + * The spinlock stays locked until the command is sent. + */ + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (!qed_mcp_has_pending_cmd(p_hwfn)) + break; + + rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (!rc) + break; + else if (rc != -EAGAIN) + goto err; + + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); + } while (++cnt < max_retries); + + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, + "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return -EAGAIN; + } + + /* Send the mailbox command */ + qed_mcp_reread_offsets(p_hwfn, p_ptt); + seq_num = ++p_hwfn->mcp_info->drv_mb_seq; + p_cmd_elem = qed_mcp_cmd_add_elem(p_hwfn, p_mb_params, seq_num); + if (!p_cmd_elem) { + rc = -ENOMEM; + goto err; + } + + __qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + /* Wait for the MFW response */ + do { + /* Exit the loop if the command is already completed, or if the + * command is completed during this iteration. + * The spinlock stays locked until the list element is removed. + */ + + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) + msleep(msecs); + else + udelay(usecs); + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (p_cmd_elem->b_is_completed) + break; + + rc = qed_mcp_update_pending_cmd(p_hwfn, p_ptt); + if (!rc) + break; + else if (rc != -EAGAIN) + goto err; + + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + } while (++cnt < max_retries); + + if (cnt >= max_retries) { + DP_NOTICE(p_hwfn, + "The MFW failed to respond to command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + qed_mcp_print_cpu_info(p_hwfn, p_ptt); + + spin_lock_bh(&p_hwfn->mcp_info->cmd_lock); + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + if (!QED_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK)) + qed_mcp_cmd_set_blocking(p_hwfn, true); + + qed_hw_err_notify(p_hwfn, p_ptt, + QED_HW_ERR_MFW_RESP_FAIL, NULL); + return -EAGAIN; + } + + qed_mcp_cmd_del_elem(p_hwfn, p_cmd_elem); + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n", + p_mb_params->mcp_resp, + p_mb_params->mcp_param, + (cnt * usecs) / 1000, (cnt * usecs) % 1000); + + /* Clear the sequence number from the MFW response */ + p_mb_params->mcp_resp &= FW_MSG_CODE_MASK; + + return 0; + +err: + spin_unlock_bh(&p_hwfn->mcp_info->cmd_lock); + return rc; +} + +static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params) +{ + size_t union_data_size = sizeof(union drv_union_data); + u32 max_retries = QED_DRV_MB_MAX_RETRIES; + u32 usecs = QED_MCP_RESP_ITER_US; + + /* MCP not initialized */ + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); + return -EBUSY; + } + + if (p_hwfn->mcp_info->b_block_cmd) { + DP_NOTICE(p_hwfn, + "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n", + p_mb_params->cmd, p_mb_params->param); + return -EBUSY; + } + + if (p_mb_params->data_src_size > union_data_size || + p_mb_params->data_dst_size > union_data_size) { + DP_ERR(p_hwfn, + "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n", + p_mb_params->data_src_size, + p_mb_params->data_dst_size, union_data_size); + return -EINVAL; + } + + if (QED_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) { + max_retries = DIV_ROUND_UP(max_retries, 1000); + usecs *= 1000; + } + + return _qed_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, max_retries, + usecs); +} + +static int _qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + bool can_sleep) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.flags = can_sleep ? QED_MB_FLAG_CAN_SLEEP : 0; + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + return 0; +} + +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, + o_mcp_resp, o_mcp_param, true)); +} + +int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + return (_qed_mcp_cmd(p_hwfn, p_ptt, cmd, param, + o_mcp_resp, o_mcp_param, false)); +} + +static int +qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, u32 i_txn_size, u32 *i_buf) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_src = i_buf; + mb_params.data_src_size = (u8)i_txn_size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + /* nvm_info needs to be updated */ + p_hwfn->nvm_info.valid = false; + + return 0; +} + +int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep) +{ + struct qed_mcp_mb_params mb_params; + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + mb_params.p_data_dst = raw_data; + + /* Use the maximal value since the actual one is part of the response */ + mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN; + if (b_can_sleep) + mb_params.flags = QED_MB_FLAG_CAN_SLEEP; + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; + + *o_txn_size = *o_mcp_param; + memcpy(o_buf, raw_data, *o_txn_size); + + return 0; +} + +static bool +qed_mcp_can_force_load(u8 drv_role, + u8 exist_drv_role, + enum qed_override_force_load override_force_load) +{ + bool can_force_load = false; + + switch (override_force_load) { + case QED_OVERRIDE_FORCE_LOAD_ALWAYS: + can_force_load = true; + break; + case QED_OVERRIDE_FORCE_LOAD_NEVER: + can_force_load = false; + break; + default: + can_force_load = (drv_role == DRV_ROLE_OS && + exist_drv_role == DRV_ROLE_PREBOOT) || + (drv_role == DRV_ROLE_KDUMP && + exist_drv_role == DRV_ROLE_OS); + break; + } + + return can_force_load; +} + +static int qed_mcp_cancel_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, + &resp, ¶m); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to send cancel load request, rc = %d\n", rc); + + return rc; +} + +#define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0) +#define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1) +#define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2) +#define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4) +#define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5) +#define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6) + +static u32 qed_get_config_bitmap(void) +{ + u32 config_bitmap = 0x0; + + if (IS_ENABLED(CONFIG_QEDE)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDE; + + if (IS_ENABLED(CONFIG_QED_SRIOV)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_SRIOV; + + if (IS_ENABLED(CONFIG_QED_RDMA)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDR; + + if (IS_ENABLED(CONFIG_QED_FCOE)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDF; + + if (IS_ENABLED(CONFIG_QED_ISCSI)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QEDI; + + if (IS_ENABLED(CONFIG_QED_LL2)) + config_bitmap |= BITMAP_IDX_FOR_CONFIG_QED_LL2; + + return config_bitmap; +} + +struct qed_load_req_in_params { + u8 hsi_ver; +#define QED_LOAD_REQ_HSI_VER_DEFAULT 0 +#define QED_LOAD_REQ_HSI_VER_1 1 + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u8 drv_role; + u8 timeout_val; + u8 force_cmd; + bool avoid_eng_reset; +}; + +struct qed_load_req_out_params { + u32 load_code; + u32 exist_drv_ver_0; + u32 exist_drv_ver_1; + u32 exist_fw_ver; + u8 exist_drv_role; + u8 mfw_hsi_ver; + bool drv_exists; +}; + +static int +__qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_load_req_in_params *p_in_params, + struct qed_load_req_out_params *p_out_params) +{ + struct qed_mcp_mb_params mb_params; + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + u32 hsi_ver; + int rc; + + memset(&load_req, 0, sizeof(load_req)); + load_req.drv_ver_0 = p_in_params->drv_ver_0; + load_req.drv_ver_1 = p_in_params->drv_ver_1; + load_req.fw_ver = p_in_params->fw_ver; + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_ROLE, p_in_params->drv_role); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_LOCK_TO, + p_in_params->timeout_val); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FORCE, + p_in_params->force_cmd); + QED_MFW_SET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0, + p_in_params->avoid_eng_reset); + + hsi_ver = (p_in_params->hsi_ver == QED_LOAD_REQ_HSI_VER_DEFAULT) ? + DRV_ID_MCP_HSI_VER_CURRENT : + (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; + mb_params.param = PDA_COMP | hsi_ver | p_hwfn->cdev->drv_type; + mb_params.p_data_src = &load_req; + mb_params.data_src_size = sizeof(load_req); + mb_params.p_data_dst = &load_rsp; + mb_params.data_dst_size = sizeof(load_rsp); + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", + mb_params.param, + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), + QED_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); + + if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", + load_req.drv_ver_0, + load_req.drv_ver_1, + load_req.fw_ver, + load_req.misc0, + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_ROLE), + QED_MFW_GET_FIELD(load_req.misc0, + LOAD_REQ_LOCK_TO), + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FORCE), + QED_MFW_GET_FIELD(load_req.misc0, LOAD_REQ_FLAGS0)); + } + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to send load request, rc = %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Load Response: resp 0x%08x\n", mb_params.mcp_resp); + p_out_params->load_code = mb_params.mcp_resp; + + if (p_in_params->hsi_ver != QED_LOAD_REQ_HSI_VER_1 && + p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", + load_rsp.drv_ver_0, + load_rsp.drv_ver_1, + load_rsp.fw_ver, + load_rsp.misc0, + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE), + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI), + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0)); + + p_out_params->exist_drv_ver_0 = load_rsp.drv_ver_0; + p_out_params->exist_drv_ver_1 = load_rsp.drv_ver_1; + p_out_params->exist_fw_ver = load_rsp.fw_ver; + p_out_params->exist_drv_role = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_ROLE); + p_out_params->mfw_hsi_ver = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_HSI); + p_out_params->drv_exists = + QED_MFW_GET_FIELD(load_rsp.misc0, LOAD_RSP_FLAGS0) & + LOAD_RSP_FLAGS0_DRV_EXISTS; + } + + return 0; +} + +static int eocre_get_mfw_drv_role(struct qed_hwfn *p_hwfn, + enum qed_drv_role drv_role, + u8 *p_mfw_drv_role) +{ + switch (drv_role) { + case QED_DRV_ROLE_OS: + *p_mfw_drv_role = DRV_ROLE_OS; + break; + case QED_DRV_ROLE_KDUMP: + *p_mfw_drv_role = DRV_ROLE_KDUMP; + break; + default: + DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role); + return -EINVAL; + } + + return 0; +} + +enum qed_load_req_force { + QED_LOAD_REQ_FORCE_NONE, + QED_LOAD_REQ_FORCE_PF, + QED_LOAD_REQ_FORCE_ALL, +}; + +static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn, + enum qed_load_req_force force_cmd, + u8 *p_mfw_force_cmd) +{ + switch (force_cmd) { + case QED_LOAD_REQ_FORCE_NONE: + *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; + break; + case QED_LOAD_REQ_FORCE_PF: + *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; + break; + case QED_LOAD_REQ_FORCE_ALL: + *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; + break; + } +} + +int qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_load_req_params *p_params) +{ + struct qed_load_req_out_params out_params; + struct qed_load_req_in_params in_params; + u8 mfw_drv_role, mfw_force_cmd; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; + in_params.drv_ver_1 = qed_get_config_bitmap(); + in_params.fw_ver = STORM_FW_VERSION; + rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); + if (rc) + return rc; + + in_params.drv_role = mfw_drv_role; + in_params.timeout_val = p_params->timeout_val; + qed_get_mfw_force_cmd(p_hwfn, + QED_LOAD_REQ_FORCE_NONE, &mfw_force_cmd); + + in_params.force_cmd = mfw_force_cmd; + in_params.avoid_eng_reset = p_params->avoid_eng_reset; + + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc) + return rc; + + /* First handle cases where another load request should/might be sent: + * - MFW expects the old interface [HSI version = 1] + * - MFW responds that a force load request is required + */ + if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_INFO(p_hwfn, + "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n"); + + in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_1; + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc) + return rc; + } else if (out_params.load_code == + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { + if (qed_mcp_can_force_load(in_params.drv_role, + out_params.exist_drv_role, + p_params->override_force_load)) { + DP_INFO(p_hwfn, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + qed_get_mfw_force_cmd(p_hwfn, + QED_LOAD_REQ_FORCE_ALL, + &mfw_force_cmd); + + in_params.force_cmd = mfw_force_cmd; + memset(&out_params, 0, sizeof(out_params)); + rc = __qed_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + } else { + DP_NOTICE(p_hwfn, + "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n", + in_params.drv_role, in_params.fw_ver, + in_params.drv_ver_0, in_params.drv_ver_1, + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + DP_NOTICE(p_hwfn, + "Avoid sending a force load request to prevent disruption of active PFs\n"); + + qed_mcp_cancel_load_req(p_hwfn, p_ptt); + return -EBUSY; + } + } + + /* Now handle the other types of responses. + * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not + * expected here after the additional revised load requests were sent. + */ + switch (out_params.load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + case FW_MSG_CODE_DRV_LOAD_PORT: + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + if (out_params.mfw_hsi_ver != QED_LOAD_REQ_HSI_VER_1 && + out_params.drv_exists) { + /* The role and fw/driver version match, but the PF is + * already loaded and has not been unloaded gracefully. + */ + DP_NOTICE(p_hwfn, + "PF is already loaded\n"); + return -EINVAL; + } + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n", + out_params.load_code); + return -EBUSY; + } + + p_params->load_code = out_params.load_code; + + return 0; +} + +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_DONE, 0, &resp, + ¶m); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a LOAD_DONE command, rc = %d\n", rc); + return rc; + } + + /* Check if there is a DID mismatch between nvm-cfg/efuse */ + if (param & FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR) + DP_NOTICE(p_hwfn, + "warning: device configuration is not supported on this board type. The device may not function as expected.\n"); + + return 0; +} + +#define MFW_COMPLETION_MAX_ITER 5000 +#define MFW_COMPLETION_INTERVAL_MS 1 + +int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params; + u32 cnt = MFW_COMPLETION_MAX_ITER; + u32 wol_param; + int rc; + + switch (p_hwfn->cdev->wol_config) { + case QED_OV_WOL_DISABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_ENABLED; + break; + default: + DP_NOTICE(p_hwfn, + "Unknown WoL configuration %02x\n", + p_hwfn->cdev->wol_config); + fallthrough; + case QED_OV_WOL_DEFAULT: + wol_param = DRV_MB_PARAM_UNLOAD_WOL_MCP; + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_REQ; + mb_params.param = wol_param; + mb_params.flags = QED_MB_FLAG_CAN_SLEEP | QED_MB_FLAG_AVOID_BLOCK; + + spin_lock_bh(&p_hwfn->mcp_info->unload_lock); + set_bit(QED_MCP_BYPASS_PROC_BIT, + &p_hwfn->mcp_info->mcp_handling_status); + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + + while (test_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status) && --cnt) + msleep(MFW_COMPLETION_INTERVAL_MS); + + if (!cnt) + DP_NOTICE(p_hwfn, + "Failed to wait MFW event completion after %d msec\n", + MFW_COMPLETION_MAX_ITER * MFW_COMPLETION_INTERVAL_MS); + + return rc; +} + +int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params; + struct mcp_mac wol_mac; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_UNLOAD_DONE; + + /* Set the primary MAC if WoL is enabled */ + if (p_hwfn->cdev->wol_config == QED_OV_WOL_ENABLED) { + u8 *p_mac = p_hwfn->cdev->wol_mac; + + memset(&wol_mac, 0, sizeof(wol_mac)); + wol_mac.mac_upper = p_mac[0] << 8 | p_mac[1]; + wol_mac.mac_lower = p_mac[2] << 24 | p_mac[3] << 16 | + p_mac[4] << 8 | p_mac[5]; + + DP_VERBOSE(p_hwfn, + (QED_MSG_SP | NETIF_MSG_IFDOWN), + "Setting WoL MAC: %pM --> [%08x,%08x]\n", + p_mac, wol_mac.mac_upper, wol_mac.mac_lower); + + mb_params.p_data_src = &wol_mac; + mb_params.data_src_size = sizeof(wol_mac); + } + + return qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + +static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 path_addr = SECTION_ADDR(mfw_path_offsize, + QED_PATH_ID(p_hwfn)); + u32 disabled_vfs[VF_MAX_STATIC / 32]; + int i; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Reading Disabled VF information from [offset %08x], path_addr %08x\n", + mfw_path_offsize, path_addr); + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) { + disabled_vfs[i] = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, + mcp_vf_disabled) + + sizeof(u32) * i); + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "FLR-ed VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); + } + + if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs)) + qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG); +} + +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_func_offsize, + MCP_PF_ID(p_hwfn)); + struct qed_mcp_mb_params mb_params; + int rc; + int i; + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV), + "Acking VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE; + mb_params.p_data_src = vfs_to_ack; + mb_params.data_src_size = VF_MAX_STATIC / 8; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n"); + return -EBUSY; + } + + /* Clear the ACK bits */ + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + qed_wr(p_hwfn, p_ptt, + func_addr + + offsetof(struct public_func, drv_ack_vf_disabled) + + i * sizeof(u32), 0); + + return rc; +} + +static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 transceiver_state; + + transceiver_state = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_HW | QED_MSG_SP), + "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", + transceiver_state, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, transceiver_data))); + + transceiver_state = GET_FIELD(transceiver_state, + ETH_TRANSCEIVER_STATE); + + if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) + DP_NOTICE(p_hwfn, "Transceiver is present.\n"); + else + DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); +} + +static void qed_mcp_read_eee_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link) +{ + u32 eee_status, val; + + p_link->eee_adv_caps = 0; + p_link->eee_lp_adv_caps = 0; + eee_status = qed_rd(p_hwfn, + p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, eee_status)); + p_link->eee_active = !!(eee_status & EEE_ACTIVE_BIT); + val = (eee_status & EEE_LD_ADV_STATUS_MASK) >> EEE_LD_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_adv_caps |= QED_EEE_10G_ADV; + val = (eee_status & EEE_LP_ADV_STATUS_MASK) >> EEE_LP_ADV_STATUS_OFFSET; + if (val & EEE_1G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_1G_ADV; + if (val & EEE_10G_ADV) + p_link->eee_lp_adv_caps |= QED_EEE_10G_ADV; +} + +static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct public_func *p_data, int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr); + u32 func_addr; + u32 i, size; + + func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + memset(p_data, 0, sizeof(*p_data)); + + size = min_t(u32, sizeof(*p_data), QED_SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + return size; +} + +static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct qed_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + p_info->bandwidth_min = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MIN_BW); + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = QED_MFW_GET_FIELD(p_shmem_info->config, + FUNC_MF_CFG_MAX_BW); + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + +static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, bool b_reset) +{ + struct qed_mcp_link_state *p_link; + u8 max_bw, min_bw; + u32 status = 0; + + /* Prevent SW/attentions from doing this at the same time */ + spin_lock_bh(&p_hwfn->mcp_info->link_lock); + + p_link = &p_hwfn->mcp_info->link_output; + memset(p_link, 0, sizeof(*p_link)); + if (!b_reset) { + status = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, link_status)); + DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP), + "Received link update [0x%08x] from mfw [Addr 0x%x]\n", + status, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, link_status))); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Resetting link indications\n"); + goto out; + } + + if (p_hwfn->b_drv_link_init) { + /* Link indication with modern MFW arrives as per-PF + * indication. + */ + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_VLINK) { + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID(p_hwfn)); + p_link->link_up = !!(shmem_info.status & + FUNC_STATUS_VIRTUAL_LINK_UP); + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Virtual link_up = %d\n", p_link->link_up); + } else { + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Physical link_up = %d\n", p_link->link_up); + } + } else { + p_link->link_up = false; + } + + p_link->full_duplex = true; + switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { + case LINK_STATUS_SPEED_AND_DUPLEX_100G: + p_link->speed = 100000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_50G: + p_link->speed = 50000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_40G: + p_link->speed = 40000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_25G: + p_link->speed = 25000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_20G: + p_link->speed = 20000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_10G: + p_link->speed = 10000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: + p_link->full_duplex = false; + fallthrough; + case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: + p_link->speed = 1000; + break; + default: + p_link->speed = 0; + p_link->link_up = 0; + } + + if (p_link->link_up && p_link->speed) + p_link->line_speed = p_link->speed; + else + p_link->line_speed = 0; + + max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; + + /* Max bandwidth configuration */ + __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw); + + /* Min bandwidth configuration */ + __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw); + qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_ptt, + p_link->min_pf_rate); + + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); + p_link->an_complete = !!(status & + LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); + p_link->parallel_detection = !!(status & + LINK_STATUS_PARALLEL_DETECTION_USED); + p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); + + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_FD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? + QED_LINK_PARTNER_SPEED_1G_HD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_10G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_20G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_40G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_50G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? + QED_LINK_PARTNER_SPEED_100G : 0; + + p_link->partner_tx_flow_ctrl_en = + !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); + p_link->partner_rx_flow_ctrl_en = + !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + + switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { + case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: + p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE; + break; + default: + p_link->partner_adv_pause = 0; + } + + p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + + if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) + qed_mcp_read_eee_config(p_hwfn, p_ptt, p_link); + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + switch (status & LINK_STATUS_FEC_MODE_MASK) { + case LINK_STATUS_FEC_MODE_NONE: + p_link->fec_active = QED_FEC_MODE_NONE; + break; + case LINK_STATUS_FEC_MODE_FIRECODE_CL74: + p_link->fec_active = QED_FEC_MODE_FIRECODE; + break; + case LINK_STATUS_FEC_MODE_RS_CL91: + p_link->fec_active = QED_FEC_MODE_RS; + break; + default: + p_link->fec_active = QED_FEC_MODE_AUTO; + } + } else { + p_link->fec_active = QED_FEC_MODE_UNSUPPORTED; + } + + qed_link_update(p_hwfn, p_ptt); +out: + spin_unlock_bh(&p_hwfn->mcp_info->link_lock); +} + +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up) +{ + struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; + struct qed_mcp_mb_params mb_params; + struct eth_phy_cfg phy_cfg; + u32 cmd, fec_bit = 0; + u32 val, ext_speed; + int rc = 0; + + /* Set the shmem configuration according to params */ + memset(&phy_cfg, 0, sizeof(phy_cfg)); + cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; + if (!params->speed.autoneg) + phy_cfg.speed = params->speed.forced_speed; + phy_cfg.pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0; + phy_cfg.pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0; + phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; + phy_cfg.adv_speed = params->speed.advertised_speeds; + phy_cfg.loopback_mode = params->loopback_mode; + + /* There are MFWs that share this capability regardless of whether + * this is feasible or not. And given that at the very least adv_caps + * would be set internally by qed, we want to make sure LFA would + * still work. + */ + if ((p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) { + phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; + if (params->eee.tx_lpi_enable) + phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; + if (params->eee.adv_caps & QED_EEE_1G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_1G; + if (params->eee.adv_caps & QED_EEE_10G_ADV) + phy_cfg.eee_cfg |= EEE_CFG_ADV_SPEED_10G; + phy_cfg.eee_cfg |= (params->eee.tx_lpi_timer << + EEE_TX_TIMER_USEC_OFFSET) & + EEE_TX_TIMER_USEC_MASK; + } + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL) { + if (params->fec & QED_FEC_MODE_NONE) + fec_bit |= FEC_FORCE_MODE_NONE; + else if (params->fec & QED_FEC_MODE_FIRECODE) + fec_bit |= FEC_FORCE_MODE_FIRECODE; + else if (params->fec & QED_FEC_MODE_RS) + fec_bit |= FEC_FORCE_MODE_RS; + else if (params->fec & QED_FEC_MODE_AUTO) + fec_bit |= FEC_FORCE_MODE_AUTO; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_FORCE_MODE, fec_bit); + } + + if (p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) { + ext_speed = 0; + if (params->ext_speed.autoneg) + ext_speed |= ETH_EXT_SPEED_NONE; + + val = params->ext_speed.forced_speed; + if (val & QED_EXT_SPEED_1G) + ext_speed |= ETH_EXT_SPEED_1G; + if (val & QED_EXT_SPEED_10G) + ext_speed |= ETH_EXT_SPEED_10G; + if (val & QED_EXT_SPEED_25G) + ext_speed |= ETH_EXT_SPEED_25G; + if (val & QED_EXT_SPEED_40G) + ext_speed |= ETH_EXT_SPEED_40G; + if (val & QED_EXT_SPEED_50G_R) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_50G_R2) + ext_speed |= ETH_EXT_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R2) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_100G_R4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_100G_P4) + ext_speed |= ETH_EXT_SPEED_100G_BASE_P4; + + SET_MFW_FIELD(phy_cfg.extended_speed, ETH_EXT_SPEED, + ext_speed); + + ext_speed = 0; + + val = params->ext_speed.advertised_speeds; + if (val & QED_EXT_SPEED_MASK_1G) + ext_speed |= ETH_EXT_ADV_SPEED_1G; + if (val & QED_EXT_SPEED_MASK_10G) + ext_speed |= ETH_EXT_ADV_SPEED_10G; + if (val & QED_EXT_SPEED_MASK_25G) + ext_speed |= ETH_EXT_ADV_SPEED_25G; + if (val & QED_EXT_SPEED_MASK_40G) + ext_speed |= ETH_EXT_ADV_SPEED_40G; + if (val & QED_EXT_SPEED_MASK_50G_R) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R; + if (val & QED_EXT_SPEED_MASK_50G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_50G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R2) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R2; + if (val & QED_EXT_SPEED_MASK_100G_R4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_R4; + if (val & QED_EXT_SPEED_MASK_100G_P4) + ext_speed |= ETH_EXT_ADV_SPEED_100G_BASE_P4; + + phy_cfg.extended_speed |= ext_speed; + + SET_MFW_FIELD(phy_cfg.fec_mode, FEC_EXTENDED_MODE, + params->ext_fec_mode); + } + + p_hwfn->b_drv_link_init = b_up; + + if (b_up) { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n", + phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed, + phy_cfg.loopback_mode, phy_cfg.fec_mode, + phy_cfg.extended_speed); + } else { + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.p_data_src = &phy_cfg; + mb_params.data_src_size = sizeof(phy_cfg); + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + + /* if mcp fails to respond we must abort */ + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* Mimic link-change attention, done for several reasons: + * - On reset, there's no guarantee MFW would trigger + * an attention. + * - On initialization, older MFWs might not indicate link change + * during LFA, so we'll never get an UP indication. + */ + qed_mcp_handle_link_change(p_hwfn, p_ptt, !b_up); + + return 0; +} + +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = qed_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, QED_PATH_ID(p_hwfn)); + + proc_kill_cnt = qed_rd(p_hwfn, p_ptt, + path_addr + + offsetof(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void qed_mcp_handle_process_kill(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + qed_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != QED_LEADING_HWFN(cdev)) + return; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Ignoring the indication since a recovery process is already in progress\n"); + return; + } + + cdev->recov_in_prog = true; + + proc_kill_cnt = qed_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, "Process kill counter: %d\n", proc_kill_cnt); + + qed_schedule_recovery_handler(p_hwfn); +} + +static void qed_mcp_send_protocol_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) +{ + enum qed_mcp_protocol_type stats_type; + union qed_mcp_protocol_stats stats; + struct qed_mcp_mb_params mb_params; + u32 hsi_param; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = QED_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + case MFW_DRV_MSG_GET_FCOE_STATS: + stats_type = QED_MCP_FCOE_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_FCOE; + break; + case MFW_DRV_MSG_GET_ISCSI_STATS: + stats_type = QED_MCP_ISCSI_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_ISCSI; + break; + case MFW_DRV_MSG_GET_RDMA_STATS: + stats_type = QED_MCP_RDMA_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_RDMA; + break; + default: + DP_NOTICE(p_hwfn, "Invalid protocol type %d\n", type); + return; + } + + qed_get_protocol_stats(p_hwfn->cdev, stats_type, &stats); + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_GET_STATS; + mb_params.param = hsi_param; + mb_params.p_data_src = &stats; + mb_params.data_src_size = sizeof(stats); + qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); +} + +static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_function_info *p_info; + struct public_func shmem_info; + u32 resp = 0, param = 0; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + + p_info = &p_hwfn->mcp_info->func_info; + + qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min); + qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max); + + /* Acknowledge the MFW */ + qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); +} + +static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 resp = 0, param = 0; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + + p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & + FUNC_MF_CFG_OV_STAG_MASK; + p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) { + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, + p_hwfn->hw_info.ovlan); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1); + + /* Configure DB to add external vlan to EDPM packets */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, + p_hwfn->hw_info.ovlan); + } else { + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0); + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0); + } + + qed_sp_pf_update_stag(p_hwfn); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); + + /* Acknowledge the MFW */ + qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, + &resp, ¶m); +} + +static void qed_mcp_handle_fan_failure(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + /* A single notification should be sent to upper driver in CMT mode */ + if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) + return; + + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_FAN_FAIL, + "Fan failure was detected on the network interface card and it's going to be shut down.\n"); +} + +struct qed_mdump_cmd_params { + u32 cmd; + void *p_data_src; + u8 data_src_size; + void *p_data_dst; + u8 data_dst_size; + u32 mcp_resp; +}; + +static int +qed_mcp_mdump_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mdump_cmd_params *p_mdump_cmd_params) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_MDUMP_CMD; + mb_params.param = p_mdump_cmd_params->cmd; + mb_params.p_data_src = p_mdump_cmd_params->p_data_src; + mb_params.data_src_size = p_mdump_cmd_params->data_src_size; + mb_params.p_data_dst = p_mdump_cmd_params->p_data_dst; + mb_params.data_dst_size = p_mdump_cmd_params->data_dst_size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + p_mdump_cmd_params->mcp_resp = mb_params.mcp_resp; + + if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_MDUMP_INVALID_CMD) { + DP_INFO(p_hwfn, + "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n", + p_mdump_cmd_params->cmd); + rc = -EOPNOTSUPP; + } else if (p_mdump_cmd_params->mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The mdump command is not supported by the MFW\n"); + rc = -EOPNOTSUPP; + } + + return rc; +} + +static int qed_mcp_mdump_ack(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mdump_cmd_params mdump_cmd_params; + + memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_ACK; + + return qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); +} + +int +qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct mdump_retain_data_stc *p_mdump_retain) +{ + struct qed_mdump_cmd_params mdump_cmd_params; + int rc; + + memset(&mdump_cmd_params, 0, sizeof(mdump_cmd_params)); + mdump_cmd_params.cmd = DRV_MSG_CODE_MDUMP_GET_RETAIN; + mdump_cmd_params.p_data_dst = p_mdump_retain; + mdump_cmd_params.data_dst_size = sizeof(*p_mdump_retain); + + rc = qed_mcp_mdump_cmd(p_hwfn, p_ptt, &mdump_cmd_params); + if (rc) + return rc; + + if (mdump_cmd_params.mcp_resp != FW_MSG_CODE_OK) { + DP_INFO(p_hwfn, + "Failed to get the mdump retained data [mcp_resp 0x%x]\n", + mdump_cmd_params.mcp_resp); + return -EINVAL; + } + + return 0; +} + +static void qed_mcp_handle_critical_error(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct mdump_retain_data_stc mdump_retain; + int rc; + + /* In CMT mode - no need for more than a single acknowledgment to the + * MFW, and no more than a single notification to the upper driver. + */ + if (p_hwfn != QED_LEADING_HWFN(p_hwfn->cdev)) + return; + + rc = qed_mcp_mdump_get_retain(p_hwfn, p_ptt, &mdump_retain); + if (rc == 0 && mdump_retain.valid) + DP_NOTICE(p_hwfn, + "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n", + mdump_retain.epoch, + mdump_retain.pf, mdump_retain.status); + else + DP_NOTICE(p_hwfn, + "The MFW notified that a critical error occurred in the device\n"); + + DP_NOTICE(p_hwfn, + "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n"); + qed_mcp_mdump_ack(p_hwfn, p_ptt); + + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_HW_ATTN, NULL); +} + +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct public_func shmem_info; + u32 port_cfg, val; + + if (!test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + return; + + memset(&p_hwfn->ufp_info, 0, sizeof(p_hwfn->ufp_info)); + port_cfg = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, oem_cfg_port)); + val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> + OEM_CFG_CHANNEL_TYPE_OFFSET; + if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) + DP_NOTICE(p_hwfn, + "Incorrect UFP Channel type %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); + + val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; + if (val == OEM_CFG_SCHED_TYPE_ETS) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_ETS; + } else if (val == OEM_CFG_SCHED_TYPE_VNIC_BW) { + p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; + } else { + p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; + DP_NOTICE(p_hwfn, + "Unknown UFP scheduling mode %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); + } + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_TC_MASK) >> + OEM_CFG_FUNC_TC_OFFSET; + p_hwfn->ufp_info.tc = (u8)val; + val = (shmem_info.oem_cfg_func & OEM_CFG_FUNC_HOST_PRI_CTRL_MASK) >> + OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET; + if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_VNIC; + } else if (val == OEM_CFG_FUNC_HOST_PRI_CTRL_OS) { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; + } else { + p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; + DP_NOTICE(p_hwfn, + "Unknown Host priority control %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); + } + + DP_NOTICE(p_hwfn, + "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); +} + +static int +qed_mcp_handle_ufp_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + qed_mcp_read_ufp_config(p_hwfn, p_ptt); + + if (p_hwfn->ufp_info.mode == QED_UFP_MODE_VNIC_BW) { + p_hwfn->qm_info.ooo_tc = p_hwfn->ufp_info.tc; + qed_hw_info_set_offload_tc(&p_hwfn->hw_info, + p_hwfn->ufp_info.tc); + + qed_qm_reconf(p_hwfn, p_ptt); + } else if (p_hwfn->ufp_info.mode == QED_UFP_MODE_ETS) { + /* Merge UFP TC with the dcbx TC data */ + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + } else { + DP_ERR(p_hwfn, "Invalid sched type, discard the UFP config\n"); + return -EINVAL; + } + + /* update storm FW with negotiation results */ + qed_sp_pf_update_ufp(p_hwfn); + + /* update stag pcp value */ + qed_sp_pf_update_stag(p_hwfn); + + return 0; +} + +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_info *info = p_hwfn->mcp_info; + int rc = 0; + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n"); + + /* Read Messages from MFW */ + qed_mcp_read_mb(p_hwfn, p_ptt); + + /* Compare current messages to old ones */ + for (i = 0; i < info->mfw_mb_length; i++) { + if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) + continue; + + found = true; + + DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, + "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", + i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + + spin_lock_bh(&p_hwfn->mcp_info->unload_lock); + if (test_bit(QED_MCP_BYPASS_PROC_BIT, + &p_hwfn->mcp_info->mcp_handling_status)) { + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + DP_INFO(p_hwfn, + "Msg [%d] is bypassed on unload flow\n", i); + continue; + } + + set_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status); + spin_unlock_bh(&p_hwfn->mcp_info->unload_lock); + + switch (i) { + case MFW_DRV_MSG_LINK_CHANGE: + qed_mcp_handle_link_change(p_hwfn, p_ptt, false); + break; + case MFW_DRV_MSG_VF_DISABLED: + qed_mcp_handle_vf_flr(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_LLDP_DATA_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_LLDP_MIB); + break; + case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_REMOTE_MIB); + break; + case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: + qed_dcbx_mib_update_event(p_hwfn, p_ptt, + QED_DCBX_OPERATIONAL_MIB); + break; + case MFW_DRV_MSG_OEM_CFG_UPDATE: + qed_mcp_handle_ufp_event(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: + qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_ERROR_RECOVERY: + qed_mcp_handle_process_kill(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + qed_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; + case MFW_DRV_MSG_BW_UPDATE: + qed_mcp_update_bw(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_S_TAG_UPDATE: + qed_mcp_update_stag(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_FAILURE_DETECTED: + qed_mcp_handle_fan_failure(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED: + qed_mcp_handle_critical_error(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_GET_TLV_REQ: + qed_mfw_tlv_req(p_hwfn); + break; + default: + DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i); + rc = -EINVAL; + } + + clear_bit(QED_MCP_IN_PROCESSING_BIT, + &p_hwfn->mcp_info->mcp_handling_status); + } + + /* ACK everything */ + for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { + __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]); + + /* MFW expect answer in BE, so we force write in that format */ + qed_wr(p_hwfn, p_ptt, + info->mfw_mb_addr + sizeof(u32) + + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * + sizeof(u32) + i * sizeof(u32), + (__force u32)val); + } + + if (!found) { + DP_NOTICE(p_hwfn, + "Received an MFW message indication but no new message!\n"); + rc = -EINVAL; + } + + /* Copy the new mfw messages into the shadow */ + memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); + + return rc; +} + +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id) +{ + u32 global_offsize, public_base; + + if (IS_VF(p_hwfn->cdev)) { + if (p_hwfn->vf_iov_info) { + struct pfvf_acquire_resp_tlv *p_resp; + + p_resp = &p_hwfn->vf_iov_info->acquire_resp; + *p_mfw_ver = p_resp->pfdev_info.mfw_ver; + return 0; + } else { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF requested MFW version prior to ACQUIRE\n"); + return -EINVAL; + } + } + + public_base = p_hwfn->mcp_info->public_base; + global_offsize = qed_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(public_base, + PUBLIC_GLOBAL)); + *p_mfw_ver = + qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + offsetof(struct public_global, mfw_ver)); + + if (p_running_bundle_id) { + *p_running_bundle_id = qed_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, 0) + + offsetof(struct public_global, + running_bundle_id)); + } + + return 0; +} + +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, mbi_ver_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + /* Read the address of the nvm_cfg */ + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + if (!nvm_cfg_addr) { + DP_NOTICE(p_hwfn, "Shared memory not initialized\n"); + return -EINVAL; + } + + /* Read the offset of nvm_cfg1 */ + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + mbi_ver_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, mbi_version); + *p_mbi_ver = qed_rd(p_hwfn, p_ptt, + mbi_ver_addr) & + (NVM_CFG1_GLOB_MBI_VERSION_0_MASK | + NVM_CFG1_GLOB_MBI_VERSION_1_MASK | + NVM_CFG1_GLOB_MBI_VERSION_2_MASK); + + return 0; +} + +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_media_type) +{ + *p_media_type = MEDIA_UNSPECIFIED; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); + return -EBUSY; + } + + if (!p_ptt) { + *p_media_type = MEDIA_UNSPECIFIED; + return -EINVAL; + } + + *p_media_type = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + media_type)); + + return 0; +} + +int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_transceiver_type) +{ + u32 transceiver_info; + + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_NONE; + *p_transceiver_state = ETH_TRANSCEIVER_STATE_UPDATING; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); + return -EBUSY; + } + + transceiver_info = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + *p_transceiver_state = (transceiver_info & + ETH_TRANSCEIVER_STATE_MASK) >> + ETH_TRANSCEIVER_STATE_OFFSET; + + if (*p_transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT) + *p_transceiver_type = (transceiver_info & + ETH_TRANSCEIVER_TYPE_MASK) >> + ETH_TRANSCEIVER_TYPE_OFFSET; + else + *p_transceiver_type = ETH_TRANSCEIVER_TYPE_UNKNOWN; + + return 0; +} + +static bool qed_is_transceiver_ready(u32 transceiver_state, + u32 transceiver_type) +{ + if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) && + ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) == 0x0) && + (transceiver_type != ETH_TRANSCEIVER_TYPE_NONE)) + return true; + + return false; +} + +int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_speed_mask) +{ + u32 transceiver_type, transceiver_state; + int ret; + + ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state, + &transceiver_type); + if (ret) + return ret; + + if (qed_is_transceiver_ready(transceiver_state, transceiver_type) == + false) + return -EINVAL; + + switch (transceiver_type) { + case ETH_TRANSCEIVER_TYPE_1G_LX: + case ETH_TRANSCEIVER_TYPE_1G_SX: + case ETH_TRANSCEIVER_TYPE_1G_PCC: + case ETH_TRANSCEIVER_TYPE_1G_ACC: + case ETH_TRANSCEIVER_TYPE_1000BASET: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_10G_SR: + case ETH_TRANSCEIVER_TYPE_10G_LR: + case ETH_TRANSCEIVER_TYPE_10G_LRM: + case ETH_TRANSCEIVER_TYPE_10G_ER: + case ETH_TRANSCEIVER_TYPE_10G_PCC: + case ETH_TRANSCEIVER_TYPE_10G_ACC: + case ETH_TRANSCEIVER_TYPE_4x10G: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_40G_LR4: + case ETH_TRANSCEIVER_TYPE_40G_SR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_100G_AOC: + case ETH_TRANSCEIVER_TYPE_100G_SR4: + case ETH_TRANSCEIVER_TYPE_100G_LR4: + case ETH_TRANSCEIVER_TYPE_100G_ER4: + case ETH_TRANSCEIVER_TYPE_100G_ACC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + case ETH_TRANSCEIVER_TYPE_25G_SR: + case ETH_TRANSCEIVER_TYPE_25G_LR: + case ETH_TRANSCEIVER_TYPE_25G_AOC: + case ETH_TRANSCEIVER_TYPE_25G_ACC_S: + case ETH_TRANSCEIVER_TYPE_25G_ACC_M: + case ETH_TRANSCEIVER_TYPE_25G_ACC_L: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; + break; + case ETH_TRANSCEIVER_TYPE_25G_CA_N: + case ETH_TRANSCEIVER_TYPE_25G_CA_S: + case ETH_TRANSCEIVER_TYPE_25G_CA_L: + case ETH_TRANSCEIVER_TYPE_4x25G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_40G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_100G_CR4: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC: + *p_speed_mask = + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; + break; + case ETH_TRANSCEIVER_TYPE_XLPPI: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; + break; + case ETH_TRANSCEIVER_TYPE_10G_BASET: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: + case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: + *p_speed_mask = NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G | + NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; + break; + default: + DP_INFO(p_hwfn, "Unknown transceiver type 0x%x\n", + transceiver_type); + *p_speed_mask = 0xff; + break; + } + + return 0; +} + +int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_board_config) +{ + u32 nvm_cfg_addr, nvm_cfg1_offset, port_cfg_addr; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + if (!qed_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, "MFW is not initialized!\n"); + return -EBUSY; + } + if (!p_ptt) { + *p_board_config = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; + return -EINVAL; + } + + nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + *p_board_config = qed_rd(p_hwfn, p_ptt, + port_cfg_addr + + offsetof(struct nvm_cfg1_port, + board_cfg)); + + return 0; +} + +/* Old MFW has a global configuration for all PFs regarding RDMA support */ +static void +qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn, + enum qed_pci_personality *p_proto) +{ + /* There wasn't ever a legacy MFW that published iwarp. + * So at this point, this is either plain l2 or RoCE. + */ + if (test_bit(QED_DEV_CAP_ROCE, &p_hwfn->hw_info.device_capabilities)) + *p_proto = QED_PCI_ETH_ROCE; + else + *p_proto = QED_PCI_ETH; + + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "According to Legacy capabilities, L2 personality is %08x\n", + (u32)*p_proto); +} + +static int +qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_pci_personality *p_proto) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp != FW_MSG_CODE_OK) { + DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP, + "MFW lacks support for command; Returns %08x\n", + resp); + return -EINVAL; + } + + switch (param) { + case FW_MB_PARAM_GET_PF_RDMA_NONE: + *p_proto = QED_PCI_ETH; + break; + case FW_MB_PARAM_GET_PF_RDMA_ROCE: + *p_proto = QED_PCI_ETH_ROCE; + break; + case FW_MB_PARAM_GET_PF_RDMA_IWARP: + *p_proto = QED_PCI_ETH_IWARP; + break; + case FW_MB_PARAM_GET_PF_RDMA_BOTH: + *p_proto = QED_PCI_ETH_RDMA; + break; + default: + DP_NOTICE(p_hwfn, + "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n", + param); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + NETIF_MSG_IFUP, + "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n", + (u32)*p_proto, resp, param); + return 0; +} + +static int +qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn, + struct public_func *p_info, + struct qed_ptt *p_ptt, + enum qed_pci_personality *p_proto) +{ + int rc = 0; + + switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { + case FUNC_MF_CFG_PROTOCOL_ETHERNET: + if (!IS_ENABLED(CONFIG_QED_RDMA)) + *p_proto = QED_PCI_ETH; + else if (qed_mcp_get_shmem_proto_mfw(p_hwfn, p_ptt, p_proto)) + qed_mcp_get_shmem_proto_legacy(p_hwfn, p_proto); + break; + case FUNC_MF_CFG_PROTOCOL_ISCSI: + *p_proto = QED_PCI_ISCSI; + break; + case FUNC_MF_CFG_PROTOCOL_FCOE: + *p_proto = QED_PCI_FCOE; + break; + case FUNC_MF_CFG_PROTOCOL_ROCE: + DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n"); + fallthrough; + default: + rc = -EINVAL; + } + + return rc; +} + +int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + struct qed_mcp_function_info *info; + struct public_func shmem_info; + + qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + info = &p_hwfn->mcp_info->func_info; + + info->pause_on_host = (shmem_info.config & + FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; + + if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info, p_ptt, + &info->protocol)) { + DP_ERR(p_hwfn, "Unknown personality %08x\n", + (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); + return -EINVAL; + } + + qed_read_pf_bandwidth(p_hwfn, &shmem_info); + + if (shmem_info.mac_upper || shmem_info.mac_lower) { + info->mac[0] = (u8)(shmem_info.mac_upper >> 8); + info->mac[1] = (u8)(shmem_info.mac_upper); + info->mac[2] = (u8)(shmem_info.mac_lower >> 24); + info->mac[3] = (u8)(shmem_info.mac_lower >> 16); + info->mac[4] = (u8)(shmem_info.mac_lower >> 8); + info->mac[5] = (u8)(shmem_info.mac_lower); + + /* Store primary MAC for later possible WoL */ + memcpy(&p_hwfn->cdev->wol_mac, info->mac, ETH_ALEN); + } else { + DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n"); + } + + info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_lower | + (((u64)shmem_info.fcoe_wwn_port_name_upper) << 32); + info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_lower | + (((u64)shmem_info.fcoe_wwn_node_name_upper) << 32); + + info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + + info->mtu = (u16)shmem_info.mtu_size; + + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_NONE; + p_hwfn->cdev->wol_config = (u8)QED_OV_WOL_DEFAULT; + if (qed_mcp_is_init(p_hwfn)) { + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_OS_WOL, 0, &resp, ¶m); + if (rc) + return rc; + if (resp == FW_MSG_CODE_OS_WOL_SUPPORTED) + p_hwfn->hw_info.b_wol_support = QED_WOL_SUPPORT_PME; + } + + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP), + "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n", + info->pause_on_host, info->protocol, + info->bandwidth_min, info->bandwidth_max, + info->mac, + info->wwn_port, info->wwn_node, + info->ovlan, (u8)p_hwfn->hw_info.b_wol_support); + + return 0; +} + +struct qed_mcp_link_params +*qed_mcp_get_link_params(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_input; +} + +struct qed_mcp_link_state +*qed_mcp_get_link_state(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_output; +} + +struct qed_mcp_link_capabilities +*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return NULL; + return &p_hwfn->mcp_info->link_capabilities; +} + +int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); + + /* Wait for the drain to complete before returning */ + msleep(1020); + + return rc; +} + +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_flash_size) +{ + u32 flash_size; + + if (IS_VF(p_hwfn->cdev)) + return -EINVAL; + + flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); + flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); + + *p_flash_size = flash_size; + + return 0; +} + +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_dev *cdev = p_hwfn->cdev; + + if (cdev->recov_in_prog) { + DP_NOTICE(p_hwfn, + "Avoid triggering a recovery since such a process is already in progress\n"); + return -EAGAIN; + } + + DP_NOTICE(p_hwfn, "Triggering a recovery process\n"); + qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return 0; +} + +#define QED_RECOVERY_PROLOG_SLEEP_MS 100 + +int qed_recovery_prolog(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + int rc; + + /* Allow ongoing PCIe transactions to complete */ + msleep(QED_RECOVERY_PROLOG_SLEEP_MS); + + /* Clear the PF's internal FID_enable in the PXP */ + rc = qed_pglueb_set_pfid_enable(p_hwfn, p_ptt, false); + if (rc) + DP_NOTICE(p_hwfn, + "qed_pglueb_set_pfid_enable() failed. rc = %d.\n", + rc); + + return rc; +} + +static int +qed_mcp_config_vf_msix_bb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) +{ + u32 resp = 0, param = 0, rc_param = 0; + int rc; + + /* Only Leader can configure MSIX, and need to take CMT into account */ + if (!IS_LEAD_HWFN(p_hwfn)) + return 0; + num *= p_hwfn->cdev->num_hwfns; + + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, + &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { + DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id); + rc = -EINVAL; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n", + num, vf_id); + } + + return rc; +} + +static int +qed_mcp_config_vf_msix_ah(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 num) +{ + u32 resp = 0, param = num, rc_param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_PF_VFS_MSIX, + param, &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE) { + DP_NOTICE(p_hwfn, "MFW failed to set MSI-X for VFs\n"); + rc = -EINVAL; + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requested 0x%02x MSI-x interrupts for VFs\n", num); + } + + return rc; +} + +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num) +{ + if (QED_IS_BB(p_hwfn->cdev)) + return qed_mcp_config_vf_msix_bb(p_hwfn, p_ptt, vf_id, num); + else + return qed_mcp_config_vf_msix_ah(p_hwfn, p_ptt, num); +} + +int +qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_drv_version *p_ver) +{ + struct qed_mcp_mb_params mb_params; + struct drv_version_stc drv_version; + __be32 val; + u32 i; + int rc; + + memset(&drv_version, 0, sizeof(drv_version)); + drv_version.version = p_ver->version; + for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) { + val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)])); + *(__be32 *)&drv_version.name[i * sizeof(u32)] = val; + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VERSION; + mb_params.p_data_src = &drv_version; + mb_params.data_src_size = sizeof(drv_version); + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +/* A maximal 100 msec waiting time for the MCP to halt */ +#define QED_MCP_HALT_SLEEP_MS 10 +#define QED_MCP_HALT_MAX_RETRIES 10 + +int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 resp = 0, param = 0, cpu_state, cnt = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, + ¶m); + if (rc) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + do { + msleep(QED_MCP_HALT_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) + break; + } while (++cnt < QED_MCP_HALT_MAX_RETRIES); + + if (cnt == QED_MCP_HALT_MAX_RETRIES) { + DP_NOTICE(p_hwfn, + "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state); + return -EBUSY; + } + + qed_mcp_cmd_set_blocking(p_hwfn, true); + + return 0; +} + +#define QED_MCP_RESUME_SLEEP_MS 10 + +int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 cpu_mode, cpu_state; + + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); + + cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT; + qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode); + msleep(QED_MCP_RESUME_SLEEP_MS); + cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE); + + if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) { + DP_NOTICE(p_hwfn, + "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n", + cpu_mode, cpu_state); + return -EBUSY; + } + + qed_mcp_cmd_set_blocking(p_hwfn, false); + + return 0; +} + +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (client) { + case QED_OV_CLIENT_DRV: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; + break; + case QED_OV_CLIENT_USER: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; + break; + case QED_OV_CLIENT_VENDOR_SPEC: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC; + break; + default: + DP_NOTICE(p_hwfn, "Invalid client type %d\n", client); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (drv_state) { + case QED_OV_DRIVER_STATE_NOT_LOADED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; + break; + case QED_OV_DRIVER_STATE_DISABLED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; + break; + case QED_OV_DRIVER_STATE_ACTIVE: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; + break; + default: + DP_NOTICE(p_hwfn, "Invalid driver state %d\n", drv_state); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send driver state\n"); + + return rc; +} + +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + drv_mb_param = (u32)mtu << DRV_MB_PARAM_OV_MTU_SIZE_SHIFT; + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_MTU, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send mtu value, rc = %d\n", rc); + + return rc; +} + +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const u8 *mac) +{ + struct qed_mcp_mb_params mb_params; + u32 mfw_mac[2]; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VMAC; + mb_params.param = DRV_MSG_CODE_VMAC_TYPE_MAC << + DRV_MSG_CODE_VMAC_TYPE_SHIFT; + mb_params.param |= MCP_PF_ID(p_hwfn); + + /* MCP is BE, and on LE platforms PCI would swap access to SHMEM + * in 32-bit granularity. + * So the MAC has to be set in native order [and not byte order], + * otherwise it would be read incorrectly by MFW after swap. + */ + mfw_mac[0] = mac[0] << 24 | mac[1] << 16 | mac[2] << 8 | mac[3]; + mfw_mac[1] = mac[4] << 24 | mac[5] << 16; + + mb_params.p_data_src = (u8 *)mfw_mac; + mb_params.data_src_size = 8; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + DP_ERR(p_hwfn, "Failed to send mac address, rc = %d\n", rc); + + /* Store primary MAC for later possible WoL */ + memcpy(p_hwfn->cdev->wol_mac, mac, ETH_ALEN); + + return rc; +} + +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_ov_wol wol) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + if (p_hwfn->hw_info.b_wol_support == QED_WOL_SUPPORT_NONE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Can't change WoL configuration when WoL isn't supported\n"); + return -EINVAL; + } + + switch (wol) { + case QED_OV_WOL_DEFAULT: + drv_mb_param = DRV_MB_PARAM_WOL_DEFAULT; + break; + case QED_OV_WOL_DISABLED: + drv_mb_param = DRV_MB_PARAM_WOL_DISABLED; + break; + case QED_OV_WOL_ENABLED: + drv_mb_param = DRV_MB_PARAM_WOL_ENABLED; + break; + default: + DP_ERR(p_hwfn, "Invalid wol state %d\n", wol); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_WOL, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send wol mode, rc = %d\n", rc); + + /* Store the WoL update for a future unload */ + p_hwfn->cdev->wol_config = (u8)wol; + + return rc; +} + +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch) +{ + u32 resp = 0, param = 0; + u32 drv_mb_param; + int rc; + + switch (eswitch) { + case QED_OV_ESWITCH_NONE: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_NONE; + break; + case QED_OV_ESWITCH_VEB: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEB; + break; + case QED_OV_ESWITCH_VEPA: + drv_mb_param = DRV_MB_PARAM_ESWITCH_MODE_VEPA; + break; + default: + DP_ERR(p_hwfn, "Invalid eswitch mode %d\n", eswitch); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE, + drv_mb_param, &resp, ¶m); + if (rc) + DP_ERR(p_hwfn, "Failed to send eswitch mode, rc = %d\n", rc); + + return rc; +} + +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, enum qed_led_mode mode) +{ + u32 resp = 0, param = 0, drv_mb_param; + int rc; + + switch (mode) { + case QED_LED_MODE_ON: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; + break; + case QED_LED_MODE_OFF: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; + break; + case QED_LED_MODE_RESTORE: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; + break; + default: + DP_NOTICE(p_hwfn, "Invalid LED mode %d\n", mode); + return -EINVAL; + } + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, + drv_mb_param, &resp, ¶m); + + return rc; +} + +int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 mask_parities) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, + mask_parities, &resp, ¶m); + + if (rc) { + DP_ERR(p_hwfn, + "MCP response failure for mask parities, aborting\n"); + } else if (resp != FW_MSG_CODE_OK) { + DP_ERR(p_hwfn, + "MCP did not acknowledge mask parity request. Old MFW?\n"); + rc = -EINVAL; + } + + return rc; +} + +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) +{ + u32 bytes_left = len, offset = 0, bytes_to_copy, read_len = 0; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + u32 resp = 0, resp_param = 0; + struct qed_ptt *p_ptt; + int rc = 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, MCP_DRV_NVM_BUF_LEN); + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NVM_READ_NVRAM, + addr + offset + + (bytes_to_copy << + DRV_MB_PARAM_NVM_LEN_OFFSET), + &resp, &resp_param, + &read_len, + (u32 *)(p_buf + offset), false); + + if (rc || (resp != FW_MSG_CODE_NVM_OK)) { + DP_NOTICE(cdev, "MCP command rc = %d\n", rc); + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't preemptible. Sleep a bit to prevent CPU hogging. + */ + if (bytes_left % 0x1000 < + (bytes_left - read_len) % 0x1000) + usleep_range(1000, 2000); + + offset += read_len; + bytes_left -= read_len; + } + + cdev->mcp_nvm_resp = resp; + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + memcpy(p_buf, &cdev->mcp_nvm_resp, sizeof(cdev->mcp_nvm_resp)); + qed_ptt_release(p_hwfn, p_ptt); + + return 0; +} + +int qed_mcp_nvm_write(struct qed_dev *cdev, + u32 cmd, u32 addr, u8 *p_buf, u32 len) +{ + u32 buf_idx = 0, buf_size, nvm_cmd, nvm_offset, resp = 0, param; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc = -EINVAL; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + + switch (cmd) { + case QED_PUT_FILE_BEGIN: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; + break; + case QED_PUT_FILE_DATA: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; + break; + case QED_NVM_WRITE_NVRAM: + nvm_cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + break; + default: + DP_NOTICE(p_hwfn, "Invalid nvm write command 0x%x\n", cmd); + rc = -EINVAL; + goto out; + } + + buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); + while (buf_idx < len) { + if (cmd == QED_PUT_FILE_BEGIN) + nvm_offset = addr; + else + nvm_offset = ((buf_size << + DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + + buf_idx; + rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, + &resp, ¶m, buf_size, + (u32 *)&p_buf[buf_idx]); + if (rc) { + DP_NOTICE(cdev, "nvm write failed, rc = %d\n", rc); + resp = FW_MSG_CODE_ERROR; + break; + } + + if (resp != FW_MSG_CODE_OK && + resp != FW_MSG_CODE_NVM_OK && + resp != FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK) { + DP_NOTICE(cdev, + "nvm write failed, resp = 0x%08x\n", resp); + rc = -EINVAL; + break; + } + + /* This can be a lengthy process, and it's possible scheduler + * isn't pre-emptable. Sleep a bit to prevent CPU hogging. + */ + if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) + usleep_range(1000, 2000); + + /* For MBI upgrade, MFW response includes the next buffer offset + * to be delivered to MFW. + */ + if (param && cmd == QED_PUT_FILE_DATA) { + buf_idx = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = + QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + } else { + buf_idx += buf_size; + buf_size = min_t(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + } + } + + cdev->mcp_nvm_resp = resp; +out: + qed_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf) +{ + u32 bytes_left, bytes_to_copy, buf_size, nvm_offset = 0; + u32 resp, param; + int rc; + + nvm_offset |= (port << DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK; + nvm_offset |= (addr << DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK; + + addr = offset; + offset = 0; + bytes_left = len; + while (bytes_left > 0) { + bytes_to_copy = min_t(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + nvm_offset &= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK | + DRV_MB_PARAM_TRANSCEIVER_PORT_MASK); + nvm_offset |= ((addr + offset) << + DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK; + nvm_offset |= (bytes_to_copy << + DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET) & + DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK; + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_TRANSCEIVER_READ, + nvm_offset, &resp, ¶m, &buf_size, + (u32 *)(p_buf + offset), true); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send a transceiver read command to the MFW. rc = %d.\n", + rc); + return rc; + } + + if (resp == FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) + return -ENODEV; + else if (resp != FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return -EINVAL; + + offset += buf_size; + bytes_left -= buf_size; + } + + return 0; +} + +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param = 0, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} + +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 drv_mb_param, rsp, param; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, ¶m); + + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (param != DRV_MB_PARAM_BIST_RC_PASSED)) + rc = -EAGAIN; + + return rc; +} + +int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images) +{ + u32 drv_mb_param = 0, rsp; + int rc = 0; + + drv_mb_param = (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT); + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST, + drv_mb_param, &rsp, num_images); + if (rc) + return rc; + + if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK)) + rc = -EINVAL; + + return rc; +} + +int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index) +{ + u32 buf_size = 0, param, resp = 0, resp_param = 0; + int rc; + + param = DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX << + DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT; + param |= image_index << DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT; + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_BIST_TEST, param, + &resp, &resp_param, + &buf_size, + (u32 *)p_image_att, false); + if (rc) + return rc; + + if (((resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) || + (p_image_att->return_code != 1)) + rc = -EINVAL; + + return rc; +} + +int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) +{ + struct qed_nvm_image_info nvm_info; + struct qed_ptt *p_ptt; + int rc; + u32 i; + + if (p_hwfn->nvm_info.valid) + return 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + + /* Acquire from MFW the amount of available images */ + nvm_info.num_images = 0; + rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, + p_ptt, &nvm_info.num_images); + if (rc == -EOPNOTSUPP) { + DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); + goto out; + } else if (rc || !nvm_info.num_images) { + DP_ERR(p_hwfn, "Failed getting number of images\n"); + goto err0; + } + + nvm_info.image_att = kmalloc_array(nvm_info.num_images, + sizeof(struct bist_nvm_image_att), + GFP_KERNEL); + if (!nvm_info.image_att) { + rc = -ENOMEM; + goto err0; + } + + /* Iterate over images and get their attributes */ + for (i = 0; i < nvm_info.num_images; i++) { + rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, + &nvm_info.image_att[i], i); + if (rc) { + DP_ERR(p_hwfn, + "Failed getting image index %d attributes\n", i); + goto err1; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, + nvm_info.image_att[i].len); + } +out: + /* Update hwfn's nvm_info */ + if (nvm_info.num_images) { + p_hwfn->nvm_info.num_images = nvm_info.num_images; + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = nvm_info.image_att; + p_hwfn->nvm_info.valid = true; + } + + qed_ptt_release(p_hwfn, p_ptt); + return 0; + +err1: + kfree(nvm_info.image_att); +err0: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} + +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->nvm_info.image_att); + p_hwfn->nvm_info.image_att = NULL; + p_hwfn->nvm_info.valid = false; +} + +int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att) +{ + enum nvm_image_type type; + int rc; + u32 i; + + /* Translate image_id into MFW definitions */ + switch (image_id) { + case QED_NVM_IMAGE_ISCSI_CFG: + type = NVM_TYPE_ISCSI_CFG; + break; + case QED_NVM_IMAGE_FCOE_CFG: + type = NVM_TYPE_FCOE_CFG; + break; + case QED_NVM_IMAGE_MDUMP: + type = NVM_TYPE_MDUMP; + break; + case QED_NVM_IMAGE_NVM_CFG1: + type = NVM_TYPE_NVM_CFG1; + break; + case QED_NVM_IMAGE_DEFAULT_CFG: + type = NVM_TYPE_DEFAULT_CFG; + break; + case QED_NVM_IMAGE_NVM_META: + type = NVM_TYPE_NVM_META; + break; + default: + DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n", + image_id); + return -EINVAL; + } + + rc = qed_mcp_nvm_info_populate(p_hwfn); + if (rc) + return rc; + + for (i = 0; i < p_hwfn->nvm_info.num_images; i++) + if (type == p_hwfn->nvm_info.image_att[i].image_type) + break; + if (i == p_hwfn->nvm_info.num_images) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Failed to find nvram image of type %08x\n", + image_id); + return -ENOENT; + } + + p_image_att->start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; + p_image_att->length = p_hwfn->nvm_info.image_att[i].len; + + return 0; +} + +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len) +{ + struct qed_nvm_image_att image_att; + int rc; + + memset(p_buffer, 0, buffer_len); + + rc = qed_mcp_get_nvm_image_att(p_hwfn, image_id, &image_att); + if (rc) + return rc; + + /* Validate sizes - both the image's and the supplied buffer's */ + if (image_att.length <= 4) { + DP_VERBOSE(p_hwfn, QED_MSG_STORAGE, + "Image [%d] is too small - only %d bytes\n", + image_id, image_att.length); + return -EINVAL; + } + + if (image_att.length > buffer_len) { + DP_VERBOSE(p_hwfn, + QED_MSG_STORAGE, + "Image [%d] is too big - %08x bytes where only %08x are available\n", + image_id, image_att.length, buffer_len); + return -ENOMEM; + } + + return qed_mcp_nvm_read(p_hwfn->cdev, image_att.start_addr, + p_buffer, image_att.length); +} + +static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id) +{ + enum resource_id_enum mfw_res_id = RESOURCE_NUM_INVALID; + + switch (res_id) { + case QED_SB: + mfw_res_id = RESOURCE_NUM_SB_E; + break; + case QED_L2_QUEUE: + mfw_res_id = RESOURCE_NUM_L2_QUEUE_E; + break; + case QED_VPORT: + mfw_res_id = RESOURCE_NUM_VPORT_E; + break; + case QED_RSS_ENG: + mfw_res_id = RESOURCE_NUM_RSS_ENGINES_E; + break; + case QED_PQ: + mfw_res_id = RESOURCE_NUM_PQ_E; + break; + case QED_RL: + mfw_res_id = RESOURCE_NUM_RL_E; + break; + case QED_MAC: + case QED_VLAN: + /* Each VFC resource can accommodate both a MAC and a VLAN */ + mfw_res_id = RESOURCE_VFC_FILTER_E; + break; + case QED_ILT: + mfw_res_id = RESOURCE_ILT_E; + break; + case QED_LL2_RAM_QUEUE: + mfw_res_id = RESOURCE_LL2_QUEUE_E; + break; + case QED_LL2_CTX_QUEUE: + mfw_res_id = RESOURCE_LL2_CQS_E; + break; + case QED_RDMA_CNQ_RAM: + case QED_CMDQS_CQS: + /* CNQ/CMDQS are the same resource */ + mfw_res_id = RESOURCE_CQS_E; + break; + case QED_RDMA_STATS_QUEUE: + mfw_res_id = RESOURCE_RDMA_STATS_QUEUE_E; + break; + case QED_BDQ: + mfw_res_id = RESOURCE_BDQ_E; + break; + default: + break; + } + + return mfw_res_id; +} + +#define QED_RESC_ALLOC_VERSION_MAJOR 2 +#define QED_RESC_ALLOC_VERSION_MINOR 0 +#define QED_RESC_ALLOC_VERSION \ + ((QED_RESC_ALLOC_VERSION_MAJOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \ + (QED_RESC_ALLOC_VERSION_MINOR << \ + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT)) + +struct qed_resc_alloc_in_params { + u32 cmd; + enum qed_resources res_id; + u32 resc_max_val; +}; + +struct qed_resc_alloc_out_params { + u32 mcp_resp; + u32 mcp_param; + u32 resc_num; + u32 resc_start; + u32 vf_resc_num; + u32 vf_resc_start; + u32 flags; +}; + +static int +qed_mcp_resc_allocation_msg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_alloc_in_params *p_in_params, + struct qed_resc_alloc_out_params *p_out_params) +{ + struct qed_mcp_mb_params mb_params; + struct resource_info mfw_resc_info; + int rc; + + memset(&mfw_resc_info, 0, sizeof(mfw_resc_info)); + + mfw_resc_info.res_id = qed_mcp_get_mfw_res_id(p_in_params->res_id); + if (mfw_resc_info.res_id == RESOURCE_NUM_INVALID) { + DP_ERR(p_hwfn, + "Failed to match resource %d [%s] with the MFW resources\n", + p_in_params->res_id, + qed_hw_get_resc_name(p_in_params->res_id)); + return -EINVAL; + } + + switch (p_in_params->cmd) { + case DRV_MSG_SET_RESOURCE_VALUE_MSG: + mfw_resc_info.size = p_in_params->resc_max_val; + fallthrough; + case DRV_MSG_GET_RESOURCE_ALLOC_MSG: + break; + default: + DP_ERR(p_hwfn, "Unexpected resource alloc command [0x%08x]\n", + p_in_params->cmd); + return -EINVAL; + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = p_in_params->cmd; + mb_params.param = QED_RESC_ALLOC_VERSION; + mb_params.p_data_src = &mfw_resc_info; + mb_params.data_src_size = sizeof(mfw_resc_info); + mb_params.p_data_dst = mb_params.p_data_src; + mb_params.data_dst_size = mb_params.data_src_size; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n", + p_in_params->cmd, + p_in_params->res_id, + qed_hw_get_resc_name(p_in_params->res_id), + QED_MFW_GET_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + QED_MFW_GET_FIELD(mb_params.param, + DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_in_params->resc_max_val); + + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + p_out_params->mcp_resp = mb_params.mcp_resp; + p_out_params->mcp_param = mb_params.mcp_param; + p_out_params->resc_num = mfw_resc_info.size; + p_out_params->resc_start = mfw_resc_info.offset; + p_out_params->vf_resc_num = mfw_resc_info.vf_size; + p_out_params->vf_resc_start = mfw_resc_info.vf_offset; + p_out_params->flags = mfw_resc_info.flags; + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n", + QED_MFW_GET_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR), + QED_MFW_GET_FIELD(p_out_params->mcp_param, + FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR), + p_out_params->resc_num, + p_out_params->resc_start, + p_out_params->vf_resc_num, + p_out_params->vf_resc_start, p_out_params->flags); + + return 0; +} + +int +qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp) +{ + struct qed_resc_alloc_out_params out_params; + struct qed_resc_alloc_in_params in_params; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.cmd = DRV_MSG_SET_RESOURCE_VALUE_MSG; + in_params.res_id = res_id; + in_params.resc_max_val = resc_max_val; + memset(&out_params, 0, sizeof(out_params)); + rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + return 0; +} + +int +qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start) +{ + struct qed_resc_alloc_out_params out_params; + struct qed_resc_alloc_in_params in_params; + int rc; + + memset(&in_params, 0, sizeof(in_params)); + in_params.cmd = DRV_MSG_GET_RESOURCE_ALLOC_MSG; + in_params.res_id = res_id; + memset(&out_params, 0, sizeof(out_params)); + rc = qed_mcp_resc_allocation_msg(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc) + return rc; + + *p_mcp_resp = out_params.mcp_resp; + + if (*p_mcp_resp == FW_MSG_CODE_RESOURCE_ALLOC_OK) { + *p_resc_num = out_params.resc_num; + *p_resc_start = out_params.resc_start; + } + + return 0; +} + +int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_INITIATE_PF_FLR, 0, + &mcp_resp, &mcp_param); +} + +static int qed_mcp_resource_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 param, u32 *p_mcp_resp, u32 *p_mcp_param) +{ + int rc; + + rc = qed_mcp_cmd_nosleep(p_hwfn, p_ptt, DRV_MSG_CODE_RESOURCE_CMD, + param, p_mcp_resp, p_mcp_param); + if (rc) + return rc; + + if (*p_mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The resource command is unsupported by the MFW\n"); + return -EINVAL; + } + + if (*p_mcp_param == RESOURCE_OPCODE_UNKNOWN_CMD) { + u8 opcode = QED_MFW_GET_FIELD(param, RESOURCE_CMD_REQ_OPCODE); + + DP_NOTICE(p_hwfn, + "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n", + param, opcode); + return -EINVAL; + } + + return rc; +} + +static int +__qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_lock_params *p_params) +{ + u32 param = 0, mcp_resp, mcp_param; + u8 opcode; + int rc; + + switch (p_params->timeout) { + case QED_MCP_RESC_LOCK_TO_DEFAULT: + opcode = RESOURCE_OPCODE_REQ; + p_params->timeout = 0; + break; + case QED_MCP_RESC_LOCK_TO_NONE: + opcode = RESOURCE_OPCODE_REQ_WO_AGING; + p_params->timeout = 0; + break; + default: + opcode = RESOURCE_OPCODE_REQ_W_AGING; + break; + } + + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_AGE, p_params->timeout); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n", + param, p_params->timeout, opcode, p_params->resource); + + /* Attempt to acquire the resource */ + rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); + if (rc) + return rc; + + /* Analyze the response */ + p_params->owner = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OWNER); + opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + + DP_VERBOSE(p_hwfn, + QED_MSG_SP, + "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n", + mcp_param, opcode, p_params->owner); + + switch (opcode) { + case RESOURCE_OPCODE_GNT: + p_params->b_granted = true; + break; + case RESOURCE_OPCODE_BUSY: + p_params->b_granted = false; + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return -EINVAL; + } + + return 0; +} + +int +qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params) +{ + u32 retry_cnt = 0; + int rc; + + do { + /* No need for an interval before the first iteration */ + if (retry_cnt) { + if (p_params->sleep_b4_retry) { + u16 retry_interval_in_ms = + DIV_ROUND_UP(p_params->retry_interval, + 1000); + + msleep(retry_interval_in_ms); + } else { + udelay(p_params->retry_interval); + } + } + + rc = __qed_mcp_resc_lock(p_hwfn, p_ptt, p_params); + if (rc) + return rc; + + if (p_params->b_granted) + break; + } while (retry_cnt++ < p_params->retry_num); + + return 0; +} + +int +qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_unlock_params *p_params) +{ + u32 param = 0, mcp_resp, mcp_param; + u8 opcode; + int rc; + + opcode = p_params->b_force ? RESOURCE_OPCODE_FORCE_RELEASE + : RESOURCE_OPCODE_RELEASE; + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_RESC, p_params->resource); + QED_MFW_SET_FIELD(param, RESOURCE_CMD_REQ_OPCODE, opcode); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n", + param, opcode, p_params->resource); + + /* Attempt to release the resource */ + rc = qed_mcp_resource_cmd(p_hwfn, p_ptt, param, &mcp_resp, &mcp_param); + if (rc) + return rc; + + /* Analyze the response */ + opcode = QED_MFW_GET_FIELD(mcp_param, RESOURCE_CMD_RSP_OPCODE); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Resource unlock response: mcp_param 0x%08x [opcode %d]\n", + mcp_param, opcode); + + switch (opcode) { + case RESOURCE_OPCODE_RELEASED_PREVIOUS: + DP_INFO(p_hwfn, + "Resource unlock request for an already released resource [%d]\n", + p_params->resource); + fallthrough; + case RESOURCE_OPCODE_RELEASED: + p_params->b_released = true; + break; + case RESOURCE_OPCODE_WRONG_OWNER: + p_params->b_released = false; + break; + default: + DP_NOTICE(p_hwfn, + "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n", + mcp_param, opcode); + return -EINVAL; + } + + return 0; +} + +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent) +{ + if (p_lock) { + memset(p_lock, 0, sizeof(*p_lock)); + + /* Permanent resources don't require aging, and there's no + * point in trying to acquire them more than once since it's + * unexpected another entity would release them. + */ + if (b_is_permanent) { + p_lock->timeout = QED_MCP_RESC_LOCK_TO_NONE; + } else { + p_lock->retry_num = QED_MCP_RESC_LOCK_RETRY_CNT_DFLT; + p_lock->retry_interval = + QED_MCP_RESC_LOCK_RETRY_VAL_DFLT; + p_lock->sleep_b4_retry = true; + } + + p_lock->resource = resource; + } + + if (p_unlock) { + memset(p_unlock, 0, sizeof(*p_unlock)); + p_unlock->resource = resource; + } +} + +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ); +} + +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT, + 0, &mcp_resp, &p_hwfn->mcp_info->capabilities); + if (!rc) + DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_PROBE), + "MFW supported features: %08x\n", + p_hwfn->mcp_info->capabilities); + + return rc; +} + +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 mcp_resp, mcp_param, features; + + features = DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE | + DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK | + DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL; + + return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT, + features, &mcp_resp, &mcp_param); +} + +int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params = {0}; + struct qed_dev *cdev = p_hwfn->cdev; + u8 fir_valid, l2_valid; + int rc; + + mb_params.cmd = DRV_MSG_CODE_GET_ENGINE_CONFIG; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_engine_config command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } + + fir_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID); + if (fir_valid) + cdev->fir_affin = + QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE); + + l2_valid = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID); + if (l2_valid) + cdev->l2_affin_hint = + QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE); + + DP_INFO(p_hwfn, + "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n", + fir_valid, cdev->fir_affin, l2_valid, cdev->l2_affin_hint); + + return 0; +} + +int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_mcp_mb_params mb_params = {0}; + struct qed_dev *cdev = p_hwfn->cdev; + int rc; + + mb_params.cmd = DRV_MSG_CODE_GET_PPFID_BITMAP; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The get_ppfid_bitmap command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } + + cdev->ppfid_bitmap = QED_MFW_GET_FIELD(mb_params.mcp_param, + FW_MB_PARAM_PPFID_BITMAP); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "PPFID bitmap 0x%hhx\n", + cdev->ppfid_bitmap); + + return 0; +} + +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len) +{ + u32 mb_param = 0, resp, param; + int rc; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_GET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, p_len, + (u32 *)p_buf, false); + + return rc; +} + +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len) +{ + u32 mb_param = 0, resp, param; + + QED_MFW_SET_FIELD(mb_param, DRV_MB_PARAM_NVM_CFG_OPTION_ID, option_id); + if (flags & QED_NVM_CFG_OPTION_ALL) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ALL, 1); + if (flags & QED_NVM_CFG_OPTION_INIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_INIT, 1); + if (flags & QED_NVM_CFG_OPTION_COMMIT) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT, 1); + if (flags & QED_NVM_CFG_OPTION_FREE) + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_FREE, 1); + if (flags & QED_NVM_CFG_OPTION_ENTITY_SEL) { + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL, 1); + QED_MFW_SET_FIELD(mb_param, + DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID, + entity_id); + } + + return qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_SET_NVM_CFG_OPTION, + mb_param, &resp, ¶m, len, (u32 *)p_buf); +} + +#define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN +#define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32) +#define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \ + (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE) + +static int +__qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_buf, u8 size) +{ + struct qed_mcp_mb_params mb_params; + int rc; + + if (size > QED_MCP_DBG_DATA_MAX_SIZE) { + DP_ERR(p_hwfn, + "Debug data size is %d while it should not exceed %d\n", + size, QED_MCP_DBG_DATA_MAX_SIZE); + return -EINVAL; + } + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_DEBUG_DATA_SEND; + SET_MFW_FIELD(mb_params.param, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE, size); + mb_params.p_data_src = p_buf; + mb_params.data_src_size = size; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + if (mb_params.mcp_resp == FW_MSG_CODE_UNSUPPORTED) { + DP_INFO(p_hwfn, + "The DEBUG_DATA_SEND command is unsupported by the MFW\n"); + return -EOPNOTSUPP; + } else if (mb_params.mcp_resp == (u32)FW_MSG_CODE_DEBUG_NOT_ENABLED) { + DP_INFO(p_hwfn, "The DEBUG_DATA_SEND command is not enabled\n"); + return -EBUSY; + } else if (mb_params.mcp_resp != (u32)FW_MSG_CODE_DEBUG_DATA_SEND_OK) { + DP_NOTICE(p_hwfn, + "Failed to send debug data to the MFW [resp 0x%08x]\n", + mb_params.mcp_resp); + return -EINVAL; + } + + return 0; +} + +enum qed_mcp_dbg_data_type { + QED_MCP_DBG_DATA_TYPE_RAW, +}; + +/* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */ +#define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0 +#define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff +#define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12 +#define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000 +#define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20 +#define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000 +#define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28 +#define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000 + +#define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1 +#define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2 + +static int +qed_mcp_send_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_mcp_dbg_data_type type, u8 *p_buf, u32 size) +{ + u8 raw_data[QED_MCP_DBG_DATA_MAX_SIZE], *p_tmp_buf = p_buf; + u32 tmp_size = size, *p_header, *p_payload; + u8 flags = 0; + u16 seq; + int rc; + + p_header = (u32 *)raw_data; + p_payload = (u32 *)(raw_data + QED_MCP_DBG_DATA_MAX_HEADER_SIZE); + + seq = (u16)atomic_inc_return(&p_hwfn->mcp_info->dbg_data_seq); + + /* First chunk is marked as 'first' */ + flags |= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; + + *p_header = 0; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_SN, seq); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_TYPE, type); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_PF, p_hwfn->abs_pf_id); + + while (tmp_size > QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE) { + memcpy(p_payload, p_tmp_buf, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE); + rc = __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, + QED_MCP_DBG_DATA_MAX_SIZE); + if (rc) + return rc; + + /* Clear the 'first' marking after sending the first chunk */ + if (p_tmp_buf == p_buf) { + flags &= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, + flags); + } + + p_tmp_buf += QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; + tmp_size -= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE; + } + + /* Last chunk is marked as 'last' */ + flags |= QED_MCP_DBG_DATA_HDR_FLAGS_LAST; + SET_MFW_FIELD(*p_header, QED_MCP_DBG_DATA_HDR_FLAGS, flags); + memcpy(p_payload, p_tmp_buf, tmp_size); + + /* Casting the left size to u8 is ok since at this point it is <= 32 */ + return __qed_mcp_send_debug_data(p_hwfn, p_ptt, raw_data, + (u8)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE + + tmp_size)); +} + +int +qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_buf, u32 size) +{ + return qed_mcp_send_debug_data(p_hwfn, p_ptt, + QED_MCP_DBG_DATA_TYPE_RAW, p_buf, size); +} + +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK); +} + +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active) +{ + u32 resp = 0, param = 0; + int rc; + + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_MANAGEMENT_STATUS, 0, &resp, ¶m); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to send ESL command, rc = %d\n", rc); + return rc; + } + + *active = !!(param & FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED); + + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h new file mode 100644 index 0000000000..9bd0565fe8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -0,0 +1,1396 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_MCP_H +#define _QED_MCP_H + +#include <linux/types.h> +#include <linux/delay.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_fcoe_if.h> +#include "qed_hsi.h" +#include "qed_dev_api.h" + +#define QED_MFW_REPORT_STR_SIZE 256 + +struct qed_mcp_link_speed_params { + bool autoneg; + + u32 advertised_speeds; +#define QED_EXT_SPEED_MASK_RES 0x1 +#define QED_EXT_SPEED_MASK_1G 0x2 +#define QED_EXT_SPEED_MASK_10G 0x4 +#define QED_EXT_SPEED_MASK_20G 0x8 +#define QED_EXT_SPEED_MASK_25G 0x10 +#define QED_EXT_SPEED_MASK_40G 0x20 +#define QED_EXT_SPEED_MASK_50G_R 0x40 +#define QED_EXT_SPEED_MASK_50G_R2 0x80 +#define QED_EXT_SPEED_MASK_100G_R2 0x100 +#define QED_EXT_SPEED_MASK_100G_R4 0x200 +#define QED_EXT_SPEED_MASK_100G_P4 0x400 + + u32 forced_speed; /* In Mb/s */ +#define QED_EXT_SPEED_1G 0x1 +#define QED_EXT_SPEED_10G 0x2 +#define QED_EXT_SPEED_20G 0x4 +#define QED_EXT_SPEED_25G 0x8 +#define QED_EXT_SPEED_40G 0x10 +#define QED_EXT_SPEED_50G_R 0x20 +#define QED_EXT_SPEED_50G_R2 0x40 +#define QED_EXT_SPEED_100G_R2 0x80 +#define QED_EXT_SPEED_100G_R4 0x100 +#define QED_EXT_SPEED_100G_P4 0x200 +}; + +struct qed_mcp_link_pause_params { + bool autoneg; + bool forced_rx; + bool forced_tx; +}; + +enum qed_mcp_eee_mode { + QED_MCP_EEE_DISABLED, + QED_MCP_EEE_ENABLED, + QED_MCP_EEE_UNSUPPORTED +}; + +struct qed_mcp_link_params { + struct qed_mcp_link_speed_params speed; + struct qed_mcp_link_pause_params pause; + u32 loopback_mode; + struct qed_link_eee_params eee; + u32 fec; + + struct qed_mcp_link_speed_params ext_speed; + u32 ext_fec_mode; +}; + +struct qed_mcp_link_capabilities { + u32 speed_capabilities; + bool default_speed_autoneg; + u32 fec_default; + enum qed_mcp_eee_mode default_eee; + u32 eee_lpi_timer; + u8 eee_speed_caps; + + u32 default_ext_speed_caps; + u32 default_ext_autoneg; + u32 default_ext_speed; + u32 default_ext_fec; +}; + +struct qed_mcp_link_state { + bool link_up; + u32 min_pf_rate; + + /* Actual link speed in Mb/s */ + u32 line_speed; + + /* PF max speed in Mb/s, deduced from line_speed + * according to PF max bandwidth configuration. + */ + u32 speed; + + bool full_duplex; + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + + u32 partner_adv_speed; +#define QED_LINK_PARTNER_SPEED_1G_HD BIT(0) +#define QED_LINK_PARTNER_SPEED_1G_FD BIT(1) +#define QED_LINK_PARTNER_SPEED_10G BIT(2) +#define QED_LINK_PARTNER_SPEED_20G BIT(3) +#define QED_LINK_PARTNER_SPEED_25G BIT(4) +#define QED_LINK_PARTNER_SPEED_40G BIT(5) +#define QED_LINK_PARTNER_SPEED_50G BIT(6) +#define QED_LINK_PARTNER_SPEED_100G BIT(7) + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + + u8 partner_adv_pause; +#define QED_LINK_PARTNER_SYMMETRIC_PAUSE 0x1 +#define QED_LINK_PARTNER_ASYMMETRIC_PAUSE 0x2 +#define QED_LINK_PARTNER_BOTH_PAUSE 0x3 + + bool sfp_tx_fault; + bool eee_active; + u8 eee_adv_caps; + u8 eee_lp_adv_caps; + + u32 fec_active; +}; + +struct qed_mcp_function_info { + u8 pause_on_host; + + enum qed_pci_personality protocol; + + u8 bandwidth_min; + u8 bandwidth_max; + + u8 mac[ETH_ALEN]; + + u64 wwn_port; + u64 wwn_node; + +#define QED_MCP_VLAN_UNSET (0xffff) + u16 ovlan; + + u16 mtu; +}; + +struct qed_mcp_nvm_common { + u32 offset; + u32 param; + u32 resp; + u32 cmd; +}; + +struct qed_mcp_drv_version { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct qed_mcp_lan_stats { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; +}; + +struct qed_mcp_fcoe_stats { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct qed_mcp_iscsi_stats { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct qed_mcp_rdma_stats { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_byts; +}; + +enum qed_mcp_protocol_type { + QED_MCP_LAN_STATS, + QED_MCP_FCOE_STATS, + QED_MCP_ISCSI_STATS, + QED_MCP_RDMA_STATS +}; + +union qed_mcp_protocol_stats { + struct qed_mcp_lan_stats lan_stats; + struct qed_mcp_fcoe_stats fcoe_stats; + struct qed_mcp_iscsi_stats iscsi_stats; + struct qed_mcp_rdma_stats rdma_stats; +}; + +enum qed_ov_eswitch { + QED_OV_ESWITCH_NONE, + QED_OV_ESWITCH_VEB, + QED_OV_ESWITCH_VEPA +}; + +enum qed_ov_client { + QED_OV_CLIENT_DRV, + QED_OV_CLIENT_USER, + QED_OV_CLIENT_VENDOR_SPEC +}; + +enum qed_ov_driver_state { + QED_OV_DRIVER_STATE_NOT_LOADED, + QED_OV_DRIVER_STATE_DISABLED, + QED_OV_DRIVER_STATE_ACTIVE +}; + +enum qed_ov_wol { + QED_OV_WOL_DEFAULT, + QED_OV_WOL_DISABLED, + QED_OV_WOL_ENABLED +}; + +enum qed_mfw_tlv_type { + QED_MFW_TLV_GENERIC = 0x1, /* Core driver TLVs */ + QED_MFW_TLV_ETH = 0x2, /* L2 driver TLVs */ + QED_MFW_TLV_FCOE = 0x4, /* FCoE protocol TLVs */ + QED_MFW_TLV_ISCSI = 0x8, /* SCSI protocol TLVs */ + QED_MFW_TLV_MAX = 0x16, +}; + +struct qed_mfw_tlv_generic { +#define QED_MFW_TLV_FLAGS_SIZE 2 + struct { + u8 ipv4_csum_offload; + u8 lso_supported; + bool b_set; + } flags; + +#define QED_MFW_TLV_MAC_COUNT 3 + /* First entry for primary MAC, 2 secondary MACs possible */ + u8 mac[QED_MFW_TLV_MAC_COUNT][6]; + bool mac_set[QED_MFW_TLV_MAC_COUNT]; + + u64 rx_frames; + bool rx_frames_set; + u64 rx_bytes; + bool rx_bytes_set; + u64 tx_frames; + bool tx_frames_set; + u64 tx_bytes; + bool tx_bytes_set; +}; + +union qed_mfw_tlv_data { + struct qed_mfw_tlv_generic generic; + struct qed_mfw_tlv_eth eth; + struct qed_mfw_tlv_fcoe fcoe; + struct qed_mfw_tlv_iscsi iscsi; +}; + +#define QED_NVM_CFG_OPTION_ALL BIT(0) +#define QED_NVM_CFG_OPTION_INIT BIT(1) +#define QED_NVM_CFG_OPTION_COMMIT BIT(2) +#define QED_NVM_CFG_OPTION_FREE BIT(3) +#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) + +/** + * qed_mcp_get_link_params(): Returns the link params of the hw function. + * + * @p_hwfn: HW device data. + * + * Returns: Pointer to link params. + */ +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_link_state(): Return the link state of the hw function. + * + * @p_hwfn: HW device data. + * + * Returns: Pointer to link state. + */ +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. + * + * @p_hwfn: HW device data. + * + * Returns: Pointer to link capabilities. + */ +struct qed_mcp_link_capabilities + *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. + * + * Return: Int. + */ +int qed_mcp_set_link(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + bool b_up); + +/** + * qed_mcp_get_mfw_ver(): Get the management firmware version value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. + * + * Return: Int - 0 - operation was successful. + */ +int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_mfw_ver, u32 *p_running_bundle_id); + +/** + * qed_mcp_get_mbi_ver(): Get the MBI version value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. + * + * Return: Int - 0 - operation was successful. + */ +int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_mbi_ver); + +/** + * qed_mcp_get_media_type(): Get media type value of the port. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value + * + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *media_type); + +/** + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. + * + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_transceiver_state, + u32 *p_tranceiver_type); + +/** + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. + * + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed + */ + +int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_speed_mask); + +/** + * qed_mcp_get_board_config(): Get board configuration. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. + * + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed + */ +int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *p_board_config); + +/** + * qed_mcp_cmd(): Sleepable function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 10ms. Should not be called from atomic + * context. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP + * response + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param); + +/** + * qed_mcp_cmd_nosleep(): Function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 10us. Should be called when sleep + * is not allowed. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP + * response + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_cmd_nosleep(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param); + +/** + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. + */ +int qed_mcp_drain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_get_flash_size(): Get the flash size value. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *p_flash_size); + +/** + * qed_mcp_send_drv_version(): Send driver version to MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_drv_version *p_ver); + +/** + * qed_get_process_kill_counter(): Read the MFW process kill counter. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: u32. + */ +u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_start_recovery_process(): Trigger a recovery process. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. + */ +int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. + * + * @cdev: Qed dev pointer. + * + * Return: int. + */ +int qed_recovery_prolog(struct qed_dev *cdev); + +/** + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_client client); + +/** + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_driver_state drv_state); + +/** + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 mtu); + +/** + * qed_mcp_ov_update_mac(): Send MAC address to MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, const u8 *mac); + +/** + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_wol wol); + +/** + * qed_mcp_set_led(): Set LED status. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_set_led(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_led_mode mode); + +/** + * qed_mcp_nvm_read(): Read from NVM. + * + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); + +/** + * qed_mcp_nvm_write(): Write to NVM. + * + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_nvm_write(struct qed_dev *cdev, + u32 cmd, u32 addr, u8 *p_buf, u32 len); + +/** + * qed_mcp_nvm_resp(): Check latest response. + * + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); + +struct qed_nvm_image_att { + u32 start_addr; + u32 length; +}; + +/** + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. + * + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + struct qed_nvm_image_att *p_image_att); + +/** + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. + * + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. + * + * Return: 0 if p_buffer now contains the nvram image. + */ +int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, + enum qed_nvm_images image_id, + u8 *p_buffer, u32 buffer_len); + +/** + * qed_mcp_bist_register_test(): Bist register test. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_bist_clock_test(): Bist clock test. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was + * successful. 0 if not. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 *num_images); + +/** + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct bist_nvm_image_att *p_image_att, + u32 image_index); + +/** + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: 0 upon success. + */ +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. + * + * Return : Int. + */ +int +qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 *p_buf, u32 size); + +/* Using hwfn number (and not pf_num) is required since in CMT mode, + * same pf_num may be used by two different hwfn + * TODO - this shouldn't really be in .h file, but until all fields + * required during hw-init will be placed in their correct place in shmem + * we need it in qed_dev.c [for readin the nvram reflection in shmem]. + */ +#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (QED_IS_BB((p_hwfn)->cdev) ? \ + ((rel_pfid) | \ + ((p_hwfn)->abs_pf_id & 1) << 3) : \ + rel_pfid) +#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) + +struct qed_mcp_info { + /* List for mailbox commands which were sent and wait for a response */ + struct list_head cmd_list; + + /* Spinlock used for protecting the access to the mailbox commands list + * and the sending of the commands. + */ + spinlock_t cmd_lock; + + /* Flag to indicate whether sending a MFW mailbox command is blocked */ + bool b_block_cmd; + + /* Spinlock used for syncing SW link-changes and link-changes + * originating from attention context. + */ + spinlock_t link_lock; + + u32 public_base; + u32 drv_mb_addr; + u32 mfw_mb_addr; + u32 port_addr; + u16 drv_mb_seq; + u16 drv_pulse_seq; + struct qed_mcp_link_params link_input; + struct qed_mcp_link_state link_output; + struct qed_mcp_link_capabilities link_capabilities; + struct qed_mcp_function_info func_info; + u8 *mfw_mb_cur; + u8 *mfw_mb_shadow; + u16 mfw_mb_length; + u32 mcp_hist; + + /* Capabilties negotiated with the MFW */ + u32 capabilities; + + /* S/N for debug data mailbox commands */ + atomic_t dbg_data_seq; + + /* Spinlock used to sync the flag mcp_handling_status with + * the mfw events handler + */ + spinlock_t unload_lock; + unsigned long mcp_handling_status; +#define QED_MCP_BYPASS_PROC_BIT 0 +#define QED_MCP_IN_PROCESSING_BIT 1 +}; + +struct qed_mcp_mb_params { + u32 cmd; + u32 param; + void *p_data_src; + void *p_data_dst; + u8 data_src_size; + u8 data_dst_size; + u32 mcp_resp; + u32 mcp_param; + u32 flags; +#define QED_MB_FLAG_CAN_SLEEP (0x1 << 0) +#define QED_MB_FLAG_AVOID_BLOCK (0x1 << 1) +#define QED_MB_FLAGS_IS_SET(params, flag) \ + ({ typeof(params) __params = (params); \ + (__params && (__params->flags & QED_MB_FLAG_ ## flag)); }) +}; + +struct qed_drv_tlv_hdr { + u8 tlv_type; + u8 tlv_length; /* In dwords - not including this header */ + u8 tlv_reserved; +#define QED_DRV_TLV_FLAGS_CHANGED 0x01 + u8 tlv_flags; +}; + +/** + * qed_mcp_is_ext_speed_supported() - Check if management firmware supports + * extended speeds. + * @p_hwfn: HW device data. + * + * Return: true if supported, false otherwise. + */ +static inline bool +qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) +{ + return !!(p_hwfn->mcp_info->capabilities & + FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL); +} + +/** + * qed_mcp_cmd_init(): Initialize the interface with the MCP. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. + */ +int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + * + * Can only be called after `num_ports_in_engines' is set + */ +void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); +/** + * qed_mcp_free(): Releases resources allocated during the init process. + * + * @p_hwfn: HW function. + * + * Return: Int. + */ + +int qed_mcp_free(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +enum qed_drv_role { + QED_DRV_ROLE_OS, + QED_DRV_ROLE_KDUMP, +}; + +struct qed_load_req_params { + /* Input params */ + enum qed_drv_role drv_role; + u8 timeout_val; + bool avoid_eng_reset; + enum qed_override_force_load override_force_load; + + /* Output params */ + u32 load_code; +}; + +/** + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_load_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_load_req_params *p_params); + +/** + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 *vfs_to_ack); + +/** + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: 0 upon success. + */ +int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_reset(): Reset the MCP using mailbox command. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: 0 upon success. + */ +int qed_mcp_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt); + +/** + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. + * @b_can_sleep: Can sleep. + * + * Return: 0 upon success. + */ +int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf, bool b_can_sleep); + +/** + * qed_mcp_phy_sfp_read(): Read from sfp. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); + +/** + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible + * + * @p_hwfn: HW device data. + * + * Return: true if MFW is running and mcp_info is initialized. + */ +bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. + * + * Return: Int. + */ +int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 vf_id, u8 num); + +/** + * qed_mcp_halt(): Halt the MCP. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: 0 upon success. + */ +int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_resume: Wake up the MCP. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: 0 upon success. + */ +int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw); +int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw); +int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 max_bw); +int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_link_state *p_link, + u8 min_bw); + +int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u32 mask_parities); + +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct mdump_retain_data_stc *p_mdump_retain); + +/** + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 resc_max_val, u32 *p_mcp_resp); + +/** + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_resources res_id, + u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); + +/** + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + enum qed_ov_eswitch eswitch); + +#define QED_MCP_RESC_LOCK_MIN_VAL RESOURCE_DUMP +#define QED_MCP_RESC_LOCK_MAX_VAL 31 + +enum qed_resc_lock { + QED_RESC_LOCK_DBG_DUMP = QED_MCP_RESC_LOCK_MIN_VAL, + QED_RESC_LOCK_PTP_PORT0, + QED_RESC_LOCK_PTP_PORT1, + QED_RESC_LOCK_PTP_PORT2, + QED_RESC_LOCK_PTP_PORT3, + QED_RESC_LOCK_RESC_ALLOC = QED_MCP_RESC_LOCK_MAX_VAL, + QED_RESC_LOCK_RESC_INVALID +}; + +/** + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int - 0 - Operation was successul. + */ +int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +struct qed_resc_lock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Lock timeout value in seconds [default, none or 1..254] */ + u8 timeout; +#define QED_MCP_RESC_LOCK_TO_DEFAULT 0 +#define QED_MCP_RESC_LOCK_TO_NONE 255 + + /* Number of times to retry locking */ + u8 retry_num; +#define QED_MCP_RESC_LOCK_RETRY_CNT_DFLT 10 + + /* The interval in usec between retries */ + u16 retry_interval; +#define QED_MCP_RESC_LOCK_RETRY_VAL_DFLT 10000 + + /* Use sleep or delay between retries */ + bool sleep_b4_retry; + + /* Will be set as true if the resource is free and granted */ + bool b_granted; + + /* Will be filled with the resource owner. + * [0..15 = PF0-15, 16 = MFW] + */ + u8 owner; +}; + +/** + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_resc_lock_params *p_params); + +struct qed_resc_unlock_params { + /* Resource number [valid values are 0..31] */ + u8 resource; + + /* Allow to release a resource even if belongs to another PF */ + bool b_force; + + /* Will be set as true if the resource is released */ + bool b_released; +}; + +/** + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. + * + * Return: Int - 0 - Operation was successul. + */ +int +qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_resc_unlock_params *p_params); + +/** + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. + * + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. + * + * Return: Void. + */ +void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, + struct qed_resc_unlock_params *p_unlock, + enum qed_resc_lock + resource, bool b_is_permanent); + +/** + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN + * + * @p_hwfn: HW device data. + * + * Return: bool true if feature is supported. + */ +bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. + */ +void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_engine_config(): Get the engine affinity configuration. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. + */ +int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + +/** + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. + * + * Return: Int. + */ +int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 *p_len); + +/** + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. + */ +int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, + u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, + u32 len); + +/** + * qed_mcp_is_esl_supported(): Return whether management firmware support ESL or not. + * + * @p_hwfn: hw function pointer + * + * Return: true if esl is supported, otherwise return false + */ +bool qed_mcp_is_esl_supported(struct qed_hwfn *p_hwfn); + +/** + * qed_mcp_get_esl_status(): Get enhanced system lockdown status + * + * @p_hwfn: hw function pointer + * @p_ptt: ptt resource pointer + * @active: ESL active status data pointer + * + * Return: 0 with esl status info on success, otherwise return error + */ +int qed_mcp_get_esl_status(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool *active); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h new file mode 100644 index 0000000000..6459dd3feb --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h @@ -0,0 +1,2475 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2019-2021 Marvell International Ltd. + */ + +#ifndef _QED_MFW_HSI_H +#define _QED_MFW_HSI_H + +#define MFW_TRACE_SIGNATURE 0x25071946 + +/* The trace in the buffer */ +#define MFW_TRACE_EVENTID_MASK 0x00ffff +#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000 +#define MFW_TRACE_PRM_SIZE_OFFSET 16 +#define MFW_TRACE_ENTRY_SIZE 3 + +struct mcp_trace { + u32 signature; /* Help to identify that the trace is valid */ + u32 size; /* the size of the trace buffer in bytes */ + u32 curr_level; /* 2 - all will be written to the buffer + * 1 - debug trace will not be written + * 0 - just errors will be written to the buffer + */ + u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means + * mask it. + */ + + /* Warning: the following pointers are assumed to be 32bits as they are + * used only in the MFW. + */ + u32 trace_prod; /* The next trace will be written to this offset */ + u32 trace_oldest; /* The oldest valid trace starts at this offset + * (usually very close after the current producer). + */ +}; + +#define VF_MAX_STATIC 192 +#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32) +#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) + +#define EXT_VF_MAX_STATIC 240 +#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1) +#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32)) +#define ADDED_VF_BITMAP_SIZE 2 + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 +#define MCP_GLOB_PORT_MAX 4 +#define MCP_GLOB_FUNC_MAX 16 + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +#define SECTION_OFFSET(_offsize) (((((_offsize) & \ + OFFSIZE_OFFSET_MASK) >> \ + OFFSIZE_OFFSET_SHIFT) << 2)) + +#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \ + OFFSIZE_SIZE_MASK) >> \ + OFFSIZE_SIZE_SHIFT) << 2) + +#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \ + SECTION_OFFSET((_offsize)) + \ + (QED_SECTION_SIZE((_offsize)) * (idx))) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ + ((_pub_base) + offsetof(struct mcp_public_data, sections[_section])) + +/* PHY configuration */ +struct eth_phy_cfg { + u32 speed; +#define ETH_SPEED_AUTONEG 0x0 +#define ETH_SPEED_SMARTLINQ 0x8 + + u32 pause; +#define ETH_PAUSE_NONE 0x0 +#define ETH_PAUSE_AUTONEG 0x1 +#define ETH_PAUSE_RX 0x2 +#define ETH_PAUSE_TX 0x4 + + u32 adv_speed; + + u32 loopback_mode; +#define ETH_LOOPBACK_NONE 0x0 +#define ETH_LOOPBACK_INT_PHY 0x1 +#define ETH_LOOPBACK_EXT_PHY 0x2 +#define ETH_LOOPBACK_EXT 0x3 +#define ETH_LOOPBACK_MAC 0x4 +#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5 +#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6 +#define ETH_LOOPBACK_PCS_AH_ONLY 0x7 +#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8 +#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9 + + u32 eee_cfg; +#define EEE_CFG_EEE_ENABLED BIT(0) +#define EEE_CFG_TX_LPI BIT(1) +#define EEE_CFG_ADV_SPEED_1G BIT(2) +#define EEE_CFG_ADV_SPEED_10G BIT(3) +#define EEE_TX_TIMER_USEC_MASK 0xfffffff0 +#define EEE_TX_TIMER_USEC_OFFSET 4 +#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00 +#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100 +#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000 + + u32 link_modes; + + u32 fec_mode; +#define FEC_FORCE_MODE_MASK 0x000000ff +#define FEC_FORCE_MODE_OFFSET 0 +#define FEC_FORCE_MODE_NONE 0x00 +#define FEC_FORCE_MODE_FIRECODE 0x01 +#define FEC_FORCE_MODE_RS 0x02 +#define FEC_FORCE_MODE_AUTO 0x07 +#define FEC_EXTENDED_MODE_MASK 0xffffff00 +#define FEC_EXTENDED_MODE_OFFSET 8 +#define ETH_EXT_FEC_NONE 0x00000000 +#define ETH_EXT_FEC_10G_NONE 0x00000100 +#define ETH_EXT_FEC_10G_BASE_R 0x00000200 +#define ETH_EXT_FEC_25G_NONE 0x00000400 +#define ETH_EXT_FEC_25G_BASE_R 0x00000800 +#define ETH_EXT_FEC_25G_RS528 0x00001000 +#define ETH_EXT_FEC_40G_NONE 0x00002000 +#define ETH_EXT_FEC_40G_BASE_R 0x00004000 +#define ETH_EXT_FEC_50G_NONE 0x00008000 +#define ETH_EXT_FEC_50G_BASE_R 0x00010000 +#define ETH_EXT_FEC_50G_RS528 0x00020000 +#define ETH_EXT_FEC_50G_RS544 0x00040000 +#define ETH_EXT_FEC_100G_NONE 0x00080000 +#define ETH_EXT_FEC_100G_BASE_R 0x00100000 +#define ETH_EXT_FEC_100G_RS528 0x00200000 +#define ETH_EXT_FEC_100G_RS544 0x00400000 + + u32 extended_speed; +#define ETH_EXT_SPEED_MASK 0x0000ffff +#define ETH_EXT_SPEED_OFFSET 0 +#define ETH_EXT_SPEED_NONE 0x00000001 +#define ETH_EXT_SPEED_1G 0x00000002 +#define ETH_EXT_SPEED_10G 0x00000004 +#define ETH_EXT_SPEED_25G 0x00000008 +#define ETH_EXT_SPEED_40G 0x00000010 +#define ETH_EXT_SPEED_50G_BASE_R 0x00000020 +#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040 +#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080 +#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100 +#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200 +#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000 +#define ETH_EXT_ADV_SPEED_OFFSET 16 +#define ETH_EXT_ADV_SPEED_1G 0x00010000 +#define ETH_EXT_ADV_SPEED_10G 0x00020000 +#define ETH_EXT_ADV_SPEED_25G 0x00040000 +#define ETH_EXT_ADV_SPEED_40G 0x00080000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000 +#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000 +#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000 +#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000 +}; + +struct port_mf_cfg { + u32 dynamic_cfg; +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +struct eth_stats { + u64 r64; + u64 r127; + u64 r255; + u64 r511; + u64 r1023; + u64 r1518; + + union { + struct { + u64 r1522; + u64 r2047; + u64 r4095; + u64 r9216; + u64 r16383; + } bb0; + struct { + u64 unused1; + u64 r1519_to_max; + u64 unused2; + u64 unused3; + u64 unused4; + } ah0; + } u0; + + u64 rfcs; + u64 rxcf; + u64 rxpf; + u64 rxpp; + u64 raln; + u64 rfcr; + u64 rovr; + u64 rjbr; + u64 rund; + u64 rfrg; + u64 t64; + u64 t127; + u64 t255; + u64 t511; + u64 t1023; + u64 t1518; + + union { + struct { + u64 t2047; + u64 t4095; + u64 t9216; + u64 t16383; + } bb1; + struct { + u64 t1519_to_max; + u64 unused6; + u64 unused7; + u64 unused8; + } ah1; + } u1; + + u64 txpf; + u64 txpp; + + union { + struct { + u64 tlpiec; + u64 tncl; + } bb2; + struct { + u64 unused9; + u64 unused10; + } ah2; + } u2; + + u64 rbyte; + u64 rxuca; + u64 rxmca; + u64 rxbca; + u64 rxpok; + u64 tbyte; + u64 txuca; + u64 txmca; + u64 txbca; + u64 txcf; +}; + +struct pkt_type_cnt { + u64 tc_tx_pkt_cnt[8]; + u64 tc_tx_oct_cnt[8]; + u64 priority_rx_pkt_cnt[8]; + u64 priority_rx_oct_cnt[8]; +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct eth_stats eth; +}; + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM BIT(0) + +#define PORT_CMT_PORT_ROLE BIT(1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE BIT(1) + +#define PORT_CMT_TEAM_MASK BIT(2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 BIT(2) +}; + +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 +#define MAX_TLV_BUFFER 128 + +enum _lldp_agent { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +}; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 +#define DCBX_OOO_TC_MASK 0x00000f00 +#define DCBX_OOO_TC_SHIFT 8 + u32 pri_tc_tbl[1]; +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1) +#define DCBX_CEE_STRICT_PRIORITY 0xf + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +#define DCBX_TCP_OOO_TC (4) +#define DCBX_TCP_OOO_K2_4PORT_TC (3) + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_SF_IEEE_MASK 0x0000f000 +#define DCBX_APP_SF_IEEE_SHIFT 12 +#define DCBX_APP_SF_IEEE_RESERVED 0 +#define DCBX_APP_SF_IEEE_ETHTYPE 1 +#define DCBX_APP_SF_IEEE_TCP_PORT 2 +#define DCBX_APP_SF_IEEE_UDP_PORT 3 +#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4 + +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +struct dcbx_features { + struct dcbx_ets_feature ets; + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000007 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 +#define DCBX_CONFIG_VERSION_STATIC 4 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u32 flags; +#define LLDP_SYSTEM_TLV_VALID_MASK 0x1 +#define LLDP_SYSTEM_TLV_VALID_OFFSET 0 +#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2 +#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1 +#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000 +#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16 + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +struct lldp_received_tlvs_s { + u32 prefix_seq_num; + u32 length; + u32 tlvs_buffer[MAX_TLV_BUFFER]; + u32 suffix_seq_num; +}; + +struct dcb_dscp_map { + u32 flags; +#define DCB_DSCP_ENABLE_MASK 0x1 +#define DCB_DSCP_ENABLE_SHIFT 0 +#define DCB_DSCP_ENABLE 1 + u32 dscp_pri_map[8]; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct generic_idc_msg_s { + u32 source_pf; + struct mcp_val64 msg; +}; + +struct pcie_stats_stc { + u32 sr_cnt_wr_byte_msb; + u32 sr_cnt_wr_byte_lsb; + u32 sr_cnt_wr_cnt; + u32 sr_cnt_rd_byte_msb; + u32 sr_cnt_rd_byte_lsb; + u32 sr_cnt_rd_cnt; +}; + +enum _attribute_commands_e { + ATTRIBUTE_CMD_READ = 0, + ATTRIBUTE_CMD_WRITE, + ATTRIBUTE_CMD_READ_CLEAR, + ATTRIBUTE_CMD_CLEAR, + ATTRIBUTE_NUM_OF_COMMANDS +}; + +struct public_global { + u32 max_path; + u32 max_ports; +#define MODE_1P 1 +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; + u32 mdump_reason; + u32 ext_phy_upgrade_fw; + u8 runtime_port_swap_map[MODE_4P]; + u32 data_ptr; + u32 data_size; + u32 bmb_error_status_cnt; + u32 bmb_jumbo_frame_cnt; + u32 sent_to_bmc_cnt; + u32 handled_by_mfw; + u32 sent_to_nw_cnt; + u32 to_bmc_kb_per_second; + u32 bcast_dropped_to_bmc_cnt; + u32 mcast_dropped_to_bmc_cnt; + u32 ucast_dropped_to_bmc_cnt; + u32 ncsi_response_failure_cnt; + u32 device_attr; + u32 vpd_warning; +}; + +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; +}; + +struct public_path { + struct fw_flr_mb flr_mb; + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; + + u32 process_kill; +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit)) +}; + +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +struct pause_flood_monitor { + u8 period_cnt; + u8 any_brb_prs_packet_hist; + u8 any_brb_block_is_full_hist; + u8 flags; + u32 num_of_state_changes; +}; + +struct public_port { + u32 validity_map; + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + +#define LINK_STATUS_FEC_MODE_MASK 0x38000000 +#define LINK_STATUS_FEC_MODE_NONE (0 << 27) +#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27) +#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27) +#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30) + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; + u32 link_change_count; + + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + + u32 transceiver_data; +#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff +#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000 +#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define ETH_TRANSCEIVER_STATE_VALID 0x00000003 +#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10 +#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00 +#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8 +#define ETH_TRANSCEIVER_TYPE_NONE 0x00 +#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff +#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01 +#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02 +#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03 +#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04 +#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05 +#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06 +#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08 +#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09 +#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a +#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b +#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f +#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13 +#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18 +#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a +#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c +#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d +#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e +#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f +#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20 +#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21 +#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39 +#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a + + u32 wol_info; + u32 wol_pkt_len; + u32 wol_pkt_details; + struct dcb_dscp_map dcb_dscp_map; + + u32 eee_status; +#define EEE_ACTIVE_BIT BIT(0) +#define EEE_LD_ADV_STATUS_MASK 0x000000f0 +#define EEE_LD_ADV_STATUS_OFFSET 4 +#define EEE_1G_ADV BIT(1) +#define EEE_10G_ADV BIT(2) +#define EEE_LP_ADV_STATUS_MASK 0x00000f00 +#define EEE_LP_ADV_STATUS_OFFSET 8 +#define EEE_SUPPORTED_SPEED_MASK 0x0000f000 +#define EEE_SUPPORTED_SPEED_OFFSET 12 +#define EEE_1G_SUPPORTED BIT(1) +#define EEE_10G_SUPPORTED BIT(2) + + u32 eee_remote; +#define EEE_REMOTE_TW_TX_MASK 0x0000ffff +#define EEE_REMOTE_TW_TX_OFFSET 0 +#define EEE_REMOTE_TW_RX_MASK 0xffff0000 +#define EEE_REMOTE_TW_RX_OFFSET 16 + + u32 module_info; + + u32 oem_cfg_port; +#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003 +#define OEM_CFG_CHANNEL_TYPE_OFFSET 0 +#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1 +#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2 +#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C +#define OEM_CFG_SCHED_TYPE_OFFSET 2 +#define OEM_CFG_SCHED_TYPE_ETS 0x1 +#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2 + + struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS]; + u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA]; + u32 phy_module_temperature; + u32 nig_reg_stat_rx_bmb_packet; + u32 nig_reg_rx_llh_ncsi_mcp_mask; + u32 nig_reg_rx_llh_ncsi_mcp_mask_2; + struct pause_flood_monitor pause_flood_monitor; + u32 nig_drain_cnt; + struct pkt_type_cnt pkt_tc_priority_cnt; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct public_func { + u32 iscsi_boot_signature; + u32 iscsi_boot_block_offset; + + u32 mtu_size; + + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + + struct generic_idc_msg_s generic_idc_msg; + + u32 num_of_msix; + + u32 config; +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010 +#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020 +#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030 + +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001 + + u32 mac_upper; +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 fcoe_wwn_port_name_upper; + u32 fcoe_wwn_port_name_lower; + + u32 fcoe_wwn_node_name_upper; + u32 fcoe_wwn_node_name_lower; + + u32 ovlan_stag; +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; + + u32 preserve_data; + + u32 driver_last_activity_ts; + + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define LOAD_REQ_HSI_VERSION 2 +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) + + u32 oem_cfg_func; +#define OEM_CFG_FUNC_TC_MASK 0x0000000F +#define OEM_CFG_FUNC_TC_OFFSET 0 +#define OEM_CFG_FUNC_TC_0 0x0 +#define OEM_CFG_FUNC_TC_1 0x1 +#define OEM_CFG_FUNC_TC_2 0x2 +#define OEM_CFG_FUNC_TC_3 0x3 +#define OEM_CFG_FUNC_TC_4 0x4 +#define OEM_CFG_FUNC_TC_5 0x5 +#define OEM_CFG_FUNC_TC_6 0x6 +#define OEM_CFG_FUNC_TC_7 0x7 + +#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1 +#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2 + + struct drv_version_stc drv_ver; +}; + +struct mcp_mac { + u32 mac_upper; + u32 mac_lower; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +struct bist_nvm_image_att { + u32 return_code; + u32 image_type; + u32 nvm_start_addr; + u32 len; +}; + +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct fcoe_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u32 fcs_err; + u32 login_failure; +}; + +struct iscsi_stats_stc { + u64 rx_pdus; + u64 tx_pdus; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct rdma_stats_stc { + u64 rx_pkts; + u64 tx_pkts; + u64 rx_bytes; + u64 tx_bytes; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +struct fcoe_cap_stc { + u32 max_ios; + u32 max_log; + u32 max_exch; + u32 max_npiv; + u32 max_tgt; + u32 max_outstnd; +}; + +#define MAX_NUM_OF_SENSORS 7 +struct temperature_status_stc { + u32 num_of_sensors; + u32 sensor[MAX_NUM_OF_SENSORS]; +}; + +/* crash dump configuration header */ +struct mdump_config_stc { + u32 version; + u32 config; + u32 epoc; + u32 num_of_logs; + u32 valid_logs; +}; + +enum resource_id_enum { + RESOURCE_NUM_SB_E = 0, + RESOURCE_NUM_L2_QUEUE_E = 1, + RESOURCE_NUM_VPORT_E = 2, + RESOURCE_NUM_VMQ_E = 3, + RESOURCE_FACTOR_NUM_RSS_PF_E = 4, + RESOURCE_FACTOR_RSS_PER_VF_E = 5, + RESOURCE_NUM_RL_E = 6, + RESOURCE_NUM_PQ_E = 7, + RESOURCE_NUM_VF_E = 8, + RESOURCE_VFC_FILTER_E = 9, + RESOURCE_ILT_E = 10, + RESOURCE_CQS_E = 11, + RESOURCE_GFT_PROFILES_E = 12, + RESOURCE_NUM_TC_E = 13, + RESOURCE_NUM_RSS_ENGINES_E = 14, + RESOURCE_LL2_QUEUE_E = 15, + RESOURCE_RDMA_STATS_QUEUE_E = 16, + RESOURCE_BDQ_E = 17, + RESOURCE_QCN_E = 18, + RESOURCE_LLH_FILTER_E = 19, + RESOURCE_VF_MAC_ADDR = 20, + RESOURCE_LL2_CQS_E = 21, + RESOURCE_VF_CNQS = 22, + RESOURCE_MAX_NUM, + RESOURCE_NUM_INVALID = 0xFFFFFFFF +}; + +/* Resource ID is to be filled by the driver in the MB request + * Size, offset & flags to be filled by the MFW in the MB response + */ +struct resource_info { + enum resource_id_enum res_id; + u32 size; /* number of allocated resources */ + u32 offset; /* Offset of the 1st resource */ + u32 vf_size; + u32 vf_offset; + u32 flags; +#define RESOURCE_ELEMENT_STRICT BIT(0) +}; + +struct mcp_wwn { + u32 wwn_upper; + u32 wwn_lower; +}; + +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 8 +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 16 +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 20 +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + +struct mdump_retain_data_stc { + u32 valid; + u32 epoch; + u32 pf; + u32 status; +}; + +struct attribute_cmd_write_stc { + u32 val; + u32 mask; + u32 offset; +}; + +struct lldp_stats_stc { + u32 tx_frames_total; + u32 rx_frames_total; + u32 rx_frames_discarded; + u32 rx_age_outs; +}; + +struct get_att_ctrl_stc { + u32 disabled_attns; + u32 controllable_attns; +}; + +struct trace_filter_stc { + u32 level; + u32 modules; +}; + +union drv_union_data { + struct mcp_mac wol_mac; + + struct eth_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + struct fcoe_stats_stc fcoe_stats; + struct iscsi_stats_stc iscsi_stats; + struct rdma_stats_stc rdma_stats; + struct ocbb_data_stc ocbb_info; + struct temperature_status_stc temp_info; + struct resource_info resource; + struct bist_nvm_image_att nvm_image_att; + struct mdump_config_stc mdump_config; + struct mcp_mac lldp_mac; + struct mcp_wwn fcoe_fabric_name; + u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; + struct mdump_retain_data_stc mdump_retain; + struct attribute_cmd_write_stc attribute_cmd_write; + struct lldp_stats_stc lldp_stats; + struct pcie_stats_stc pcie_stats; + + struct get_att_ctrl_stc get_att_ctrl; + struct fcoe_cap_stc fcoe_cap; + struct trace_filter_stc trace_filter; +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define DRV_MSG_SEQ_NUMBER_OFFSET 0 +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_OFFSET 16 + + u32 drv_mb_param; + + u32 fw_mb_header; +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff +#define FW_MSG_SEQ_NUMBER_OFFSET 0 +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_OFFSET 16 + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET) +enum drv_msg_code_enum { + DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001), + DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002), + DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003), + DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005), + DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006), + DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009), + DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f), + DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010), + DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011), + DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012), + DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013), + DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016), + DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a), + DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e), + DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020), + DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023), + DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025), + DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b), + DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e), + DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f), + DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030), + DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031), + DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037), + DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e), + DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f), + DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201), + DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000), + DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100), + DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200), + DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300), + DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000), + DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100), + DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200), + DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300), + DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500), + DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600), + DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700), + DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800), + DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900), + DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000), + DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100), + DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200), + DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300), + DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400), + DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500), + DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800), + DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900), + DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00), + DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100), + DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300), + DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000), + DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001), + DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002), + DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004), + DRV_MSG_CODE_GET_MANAGEMENT_STATUS = DRV_MSG_CODE(0xc007), +}; + +#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 + +/* DRV_MSG_CODE_RETAIN_VMAC parameters */ +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0 +#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf + +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3 +#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4 + +#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce + +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 +#define DRV_MSG_CODE_STATS_TYPE_FCOE 2 +#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3 +#define DRV_MSG_CODE_STATS_TYPE_RDMA 4 + +#define BW_MAX_MASK 0x000000ff +#define BW_MAX_OFFSET 0 +#define BW_MIN_MASK 0x0000ff00 +#define BW_MIN_OFFSET 8 + +#define DRV_MSG_FAN_FAILURE_TYPE BIT(0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1) + +#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F +#define RESOURCE_CMD_REQ_RESC_SHIFT 0 +#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0 +#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5 +#define RESOURCE_OPCODE_REQ 1 +#define RESOURCE_OPCODE_REQ_WO_AGING 2 +#define RESOURCE_OPCODE_REQ_W_AGING 3 +#define RESOURCE_OPCODE_RELEASE 4 +#define RESOURCE_OPCODE_FORCE_RELEASE 5 +#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00 +#define RESOURCE_CMD_REQ_AGE_SHIFT 8 + +#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF +#define RESOURCE_CMD_RSP_OWNER_SHIFT 0 +#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700 +#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8 +#define RESOURCE_OPCODE_GNT 1 +#define RESOURCE_OPCODE_BUSY 2 +#define RESOURCE_OPCODE_RELEASED 3 +#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4 +#define RESOURCE_OPCODE_WRONG_OWNER 5 +#define RESOURCE_OPCODE_UNKNOWN_CMD 255 + +#define RESOURCE_DUMP 0 + +/* DRV_MSG_CODE_MDUMP_CMD parameters */ +#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff +#define DRV_MSG_CODE_MDUMP_ACK 0x01 +#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02 +#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03 +#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04 +#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05 +#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06 +#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07 +#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08 + +#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a + +#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b +#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c +#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d + +/* DRV_MSG_CODE_MDUMP_CMD options */ +#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8 +#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100 + +/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */ +#define DRV_MB_PARAM_ADDR_SHIFT 0 +#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF +#define DRV_MB_PARAM_DEVAD_SHIFT 16 +#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000 +#define DRV_MB_PARAM_PORT_SHIFT 21 +#define DRV_MB_PARAM_PORT_MASK 0x00600000 + +/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */ +#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0 +#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF +#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8 +#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300 +#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16 +#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000 + +/* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + +/* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + +/* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + +/* LLDP / DCBX params*/ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0 +#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010 +#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff +#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 +#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_OFFSET 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \ + DRV_MB_PARAM_WOL_DISABLED | \ + DRV_MB_PARAM_WOL_ENABLED) +#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP +#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED +#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED + +#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \ + DRV_MB_PARAM_ESWITCH_MODE_VEB | \ + DRV_MB_PARAM_ESWITCH_MODE_VEPA) +#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0 +#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 +#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 + +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1 +#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0 + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000 + + /* Resource Allocation params - Driver version support */ +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0 +#define DRV_MB_PARAM_BIST_REGISTER_TEST 1 +#define DRV_MB_PARAM_BIST_CLOCK_TEST 2 +#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3 +#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4 + +#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0 +#define DRV_MB_PARAM_BIST_RC_PASSED 1 +#define DRV_MB_PARAM_BIST_RC_FAILED 2 +#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3 + +#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0 +#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8 +#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00 + +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004 +#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008 +#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000 + +/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */ +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0 +#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff + +/* Driver attributes params */ +#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0 +#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff +#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24 +#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000 + +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff +#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17 +#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18 +#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19 +#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21 +#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24 +#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000 + +/*DRV_MSG_CODE_GET_PERM_MAC parametres*/ +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3 +#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8 +#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00 + +#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET) +enum fw_msg_code_enum { + FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000), + FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001), + FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040), + FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011), + FW_MSG_CODE_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017), + FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016), + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002), + FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003), + FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080), + FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087), + FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010), + FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011), + FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012), + FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021), + FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022), + FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023), + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030), + FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031), + FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110), + FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011), + FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012), + FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013), + FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110), + FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400), + FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500), + FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00), + FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001), + FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a), + FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b), +}; + +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff +#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0 + +/* Get PF RDMA protocol command response */ +#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0 +#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1 +#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2 +#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3 + +/* Get MFW feature support response */ +#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0) +#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1) +#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2) +#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3) +#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4) +#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5) +#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6) +#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7) +#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8) +#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9) +#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16) +#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17) +#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18) +#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19) + +#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001 + +#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0) + +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002 +#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008 +#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3 + +#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff +#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0 + +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED, + MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE, + MFW_DRV_MSG_GET_TLV_REQ, + MFW_DRV_MSG_OEM_CFG_UPDATE, + MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED, + MFW_DRV_MSG_GENERIC_IDC, + MFW_DRV_MSG_XCVR_TX_FAULT, + MFW_DRV_MSG_XCVR_RX_LOS, + MFW_DRV_MSG_GET_FCOE_CAP, + MFW_DRV_MSG_GEN_LINK_DUMP, + MFW_DRV_MSG_GEN_IDLE_CHK, + MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +struct public_mfw_mb { + u32 sup_msgs; + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +enum public_sections { + PUBLIC_DRV_MB, + PUBLIC_MFW_MB, + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + u32 num_sections; + u32 sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +/* OCBB definitions */ +enum tlvs { + /* Category 1: Device Properties */ + DRV_TLV_CLP_STR, + DRV_TLV_CLP_STR_CTD, + /* Category 6: Device Configuration */ + DRV_TLV_SCSI_TO, + DRV_TLV_R_T_TOV, + DRV_TLV_R_A_TOV, + DRV_TLV_E_D_TOV, + DRV_TLV_CR_TOV, + DRV_TLV_BOOT_TYPE, + /* Category 8: Port Configuration */ + DRV_TLV_NPIV_ENABLED, + /* Category 10: Function Configuration */ + DRV_TLV_FEATURE_FLAGS, + DRV_TLV_LOCAL_ADMIN_ADDR, + DRV_TLV_ADDITIONAL_MAC_ADDR_1, + DRV_TLV_ADDITIONAL_MAC_ADDR_2, + DRV_TLV_LSO_MAX_OFFLOAD_SIZE, + DRV_TLV_LSO_MIN_SEGMENT_COUNT, + DRV_TLV_PROMISCUOUS_MODE, + DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG, + DRV_TLV_FLEX_NIC_OUTER_VLAN_ID, + DRV_TLV_OS_DRIVER_STATES, + DRV_TLV_PXE_BOOT_PROGRESS, + /* Category 12: FC/FCoE Configuration */ + DRV_TLV_NPIV_STATE, + DRV_TLV_NUM_OF_NPIV_IDS, + DRV_TLV_SWITCH_NAME, + DRV_TLV_SWITCH_PORT_NUM, + DRV_TLV_SWITCH_PORT_ID, + DRV_TLV_VENDOR_NAME, + DRV_TLV_SWITCH_MODEL, + DRV_TLV_SWITCH_FW_VER, + DRV_TLV_QOS_PRIORITY_PER_802_1P, + DRV_TLV_PORT_ALIAS, + DRV_TLV_PORT_STATE, + DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_LINK_FAILURE_COUNT, + DRV_TLV_FCOE_BOOT_PROGRESS, + /* Category 13: iSCSI Configuration */ + DRV_TLV_TARGET_LLMNR_ENABLED, + DRV_TLV_HEADER_DIGEST_FLAG_ENABLED, + DRV_TLV_DATA_DIGEST_FLAG_ENABLED, + DRV_TLV_AUTHENTICATION_METHOD, + DRV_TLV_ISCSI_BOOT_TARGET_PORTAL, + DRV_TLV_MAX_FRAME_SIZE, + DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE, + DRV_TLV_ISCSI_BOOT_PROGRESS, + /* Category 20: Device Data */ + DRV_TLV_PCIE_BUS_RX_UTILIZATION, + DRV_TLV_PCIE_BUS_TX_UTILIZATION, + DRV_TLV_DEVICE_CPU_CORES_UTILIZATION, + DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED, + DRV_TLV_NCSI_RX_BYTES_RECEIVED, + DRV_TLV_NCSI_TX_BYTES_SENT, + /* Category 22: Base Port Data */ + DRV_TLV_RX_DISCARDS, + DRV_TLV_RX_ERRORS, + DRV_TLV_TX_ERRORS, + DRV_TLV_TX_DISCARDS, + DRV_TLV_RX_FRAMES_RECEIVED, + DRV_TLV_TX_FRAMES_SENT, + /* Category 23: FC/FCoE Port Data */ + DRV_TLV_RX_BROADCAST_PACKETS, + DRV_TLV_TX_BROADCAST_PACKETS, + /* Category 28: Base Function Data */ + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4, + DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6, + DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_PF_RX_FRAMES_RECEIVED, + DRV_TLV_RX_BYTES_RECEIVED, + DRV_TLV_PF_TX_FRAMES_SENT, + DRV_TLV_TX_BYTES_SENT, + DRV_TLV_IOV_OFFLOAD, + DRV_TLV_PCI_ERRORS_CAP_ID, + DRV_TLV_UNCORRECTABLE_ERROR_STATUS, + DRV_TLV_UNCORRECTABLE_ERROR_MASK, + DRV_TLV_CORRECTABLE_ERROR_STATUS, + DRV_TLV_CORRECTABLE_ERROR_MASK, + DRV_TLV_PCI_ERRORS_AECC_REGISTER, + DRV_TLV_TX_QUEUES_EMPTY, + DRV_TLV_RX_QUEUES_EMPTY, + DRV_TLV_TX_QUEUES_FULL, + DRV_TLV_RX_QUEUES_FULL, + /* Category 29: FC/FCoE Function Data */ + DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_FCOE_RX_FRAMES_RECEIVED, + DRV_TLV_FCOE_RX_BYTES_RECEIVED, + DRV_TLV_FCOE_TX_FRAMES_SENT, + DRV_TLV_FCOE_TX_BYTES_SENT, + DRV_TLV_CRC_ERROR_COUNT, + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_1_TIMESTAMP, + DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_2_TIMESTAMP, + DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_3_TIMESTAMP, + DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_4_TIMESTAMP, + DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_CRC_ERROR_5_TIMESTAMP, + DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT, + DRV_TLV_LOSS_OF_SIGNAL_ERRORS, + DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT, + DRV_TLV_DISPARITY_ERROR_COUNT, + DRV_TLV_CODE_VIOLATION_ERROR_COUNT, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_TIMESTAMP, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3, + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4, + DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP, + DRV_TLV_LAST_FLOGI_RJT, + DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP, + DRV_TLV_FDISCS_SENT_COUNT, + DRV_TLV_FDISC_ACCS_RECEIVED, + DRV_TLV_FDISC_RJTS_RECEIVED, + DRV_TLV_PLOGI_SENT_COUNT, + DRV_TLV_PLOGI_ACCS_RECEIVED, + DRV_TLV_PLOGI_RJTS_RECEIVED, + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_1_TIMESTAMP, + DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_2_TIMESTAMP, + DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_3_TIMESTAMP, + DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_4_TIMESTAMP, + DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID, + DRV_TLV_PLOGI_5_TIMESTAMP, + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_1_ACC_TIMESTAMP, + DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_2_ACC_TIMESTAMP, + DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_3_ACC_TIMESTAMP, + DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_4_ACC_TIMESTAMP, + DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID, + DRV_TLV_PLOGI_5_ACC_TIMESTAMP, + DRV_TLV_LOGOS_ISSUED, + DRV_TLV_LOGO_ACCS_RECEIVED, + DRV_TLV_LOGO_RJTS_RECEIVED, + DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_1_TIMESTAMP, + DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_2_TIMESTAMP, + DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_3_TIMESTAMP, + DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_4_TIMESTAMP, + DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID, + DRV_TLV_LOGO_5_TIMESTAMP, + DRV_TLV_LOGOS_RECEIVED, + DRV_TLV_ACCS_ISSUED, + DRV_TLV_PRLIS_ISSUED, + DRV_TLV_ACCS_RECEIVED, + DRV_TLV_ABTS_SENT_COUNT, + DRV_TLV_ABTS_ACCS_RECEIVED, + DRV_TLV_ABTS_RJTS_RECEIVED, + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_1_TIMESTAMP, + DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_2_TIMESTAMP, + DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_3_TIMESTAMP, + DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_4_TIMESTAMP, + DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID, + DRV_TLV_ABTS_5_TIMESTAMP, + DRV_TLV_RSCNS_RECEIVED, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3, + DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4, + DRV_TLV_LUN_RESETS_ISSUED, + DRV_TLV_ABORT_TASK_SETS_ISSUED, + DRV_TLV_TPRLOS_SENT, + DRV_TLV_NOS_SENT_COUNT, + DRV_TLV_NOS_RECEIVED_COUNT, + DRV_TLV_OLS_COUNT, + DRV_TLV_LR_COUNT, + DRV_TLV_LRR_COUNT, + DRV_TLV_LIP_SENT_COUNT, + DRV_TLV_LIP_RECEIVED_COUNT, + DRV_TLV_EOFA_COUNT, + DRV_TLV_EOFNI_COUNT, + DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT, + DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_BUSY_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT, + DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT, + DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT, + DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT, + DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT, + DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT, + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_1_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_2_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_3_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_4_TIMESTAMP, + DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ, + DRV_TLV_SCSI_CHECK_5_TIMESTAMP, + /* Category 30: iSCSI Function Data */ + DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH, + DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH, + DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED, + DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED, + DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT, + DRV_TLV_ISCSI_PDU_TX_BYTES_SENT, + DRV_TLV_RDMA_DRV_VERSION +}; + +#define I2C_DEV_ADDR_A2 0xa2 +#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60 +#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2 +#define SFP_EEPROM_A2_VCC_ADDR 0x62 +#define SFP_EEPROM_A2_VCC_SIZE 2 +#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64 +#define SFP_EEPROM_A2_TX_BIAS_SIZE 2 +#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66 +#define SFP_EEPROM_A2_TX_POWER_SIZE 2 +#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68 +#define SFP_EEPROM_A2_RX_POWER_SIZE 2 + +#define I2C_DEV_ADDR_A0 0xa0 +#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16 +#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2 +#define QSFP_EEPROM_A0_VCC_ADDR 0x1a +#define QSFP_EEPROM_A0_VCC_SIZE 2 +#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a +#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2 +#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32 +#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2 +#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22 +#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2 + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + + u32 mac_addr_lo; +}; + +struct nvm_cfg1_glob { + u32 generic_cont0; +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 + + u32 engineering_change[3]; + u32 manufacturing_id; + u32 serial_number[4]; + u32 pcie_cfg; + u32 mgmt_traffic; + + u32 core_cfg; +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15 + + u32 e_lane_cfg1; + u32 e_lane_cfg2; + u32 f_lane_cfg1; + u32 f_lane_cfg2; + u32 mps10_preemphasis; + u32 mps10_driver_current; + u32 mps25_preemphasis; + u32 mps25_driver_current; + u32 pci_id; + u32 pci_subsys_id; + u32 bar; + u32 mps10_txfir_main; + u32 mps10_txfir_post; + u32 mps25_txfir_main; + u32 mps25_txfir_post; + u32 manufacture_ver; + u32 manufacture_time; + u32 led_global_settings; + u32 generic_cont1; + + u32 mbi_version; +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + + u32 mbi_date; + u32 misc_sig; + + u32 device_capabilities; +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8 +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10 + + u32 power_dissipated; + u32 power_consumed; + u32 efi_version; + u32 multi_network_modes_capability; + u32 nvm_cfg_version; + u32 nvm_cfg_new_option_seq; + u32 nvm_cfg_removed_option_seq; + u32 nvm_cfg_updated_value_seq; + u32 extended_serial_number[8]; + u32 option_kit_pn[8]; + u32 spare_pn[8]; + u32 mps25_active_txfir_pre; + u32 mps25_active_txfir_main; + u32 mps25_active_txfir_post; + u32 features; + u32 tx_rx_eq_25g_hlpc; + u32 tx_rx_eq_25g_llpc; + u32 tx_rx_eq_25g_ac; + u32 tx_rx_eq_10g_pc; + u32 tx_rx_eq_10g_ac; + u32 tx_rx_eq_1g; + u32 tx_rx_eq_25g_bt; + u32 tx_rx_eq_10g_bt; + u32 generic_cont4; + u32 preboot_debug_mode_std; + u32 preboot_debug_mode_ext; + u32 ext_phy_cfg1; + u32 clocks; + u32 pre2_generic_cont_1; + u32 pre2_generic_cont_2; + u32 pre2_generic_cont_3; + u32 tx_rx_eq_50g_hlpc; + u32 tx_rx_eq_50g_mlpc; + u32 tx_rx_eq_50g_llpc; + u32 tx_rx_eq_50g_ac; + u32 trace_modules; + u32 pcie_class_code_fcoe; + u32 pcie_class_code_iscsi; + u32 no_provisioned_mac; + u32 lowest_mbi_version; + u32 generic_cont5; + u32 pre2_generic_cont_4; + u32 reserved[40]; +}; + +struct nvm_cfg1_path { + u32 reserved[1]; +}; + +struct nvm_cfg1_port { + u32 rel_to_opt123; + u32 rel_to_opt124; + + u32 generic_cont0; +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4 + + u32 pcie_cfg; + u32 features; + + u32 speed_cap_mask; +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40 + + u32 link_settings; +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7 + + u32 phy_cfg; + u32 mgmt_traffic; + + u32 ext_phy; + /* EEE power saving mode */ +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2 +#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3 + + u32 mba_cfg1; + u32 mba_cfg2; + u32 vf_cfg; + struct nvm_cfg_mac_address lldp_mac_address; + u32 led_port_settings; + u32 transceiver_00; + u32 device_ids; + + u32 board_cfg; +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + + u32 mnm_10g_cap; + u32 mnm_10g_ctrl; + u32 mnm_10g_misc; + u32 mnm_25g_cap; + u32 mnm_25g_ctrl; + u32 mnm_25g_misc; + u32 mnm_40g_cap; + u32 mnm_40g_ctrl; + u32 mnm_40g_misc; + u32 mnm_50g_cap; + u32 mnm_50g_ctrl; + u32 mnm_50g_misc; + u32 mnm_100g_cap; + u32 mnm_100g_ctrl; + u32 mnm_100g_misc; + + u32 temperature; + u32 ext_phy_cfg1; + + u32 extended_speed; +#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff +#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200 +#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400 + + u32 extended_fec_mode; + u32 port_generic_cont_01; + u32 port_generic_cont_02; + u32 phy_temp_monitor; + u32 reserved[109]; +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; + u32 rsrv1; + u32 rsrv2; + u32 device_id; + u32 cmn_cfg; + u32 pci_cfg; + struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; + struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; + u32 preboot_generic_cfg; + u32 features; + u32 mf_mode_feature; + u32 reserved[6]; +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; +}; + +struct board_info { + u16 vendor_id; + u16 eth_did_suffix; + u16 sub_vendor_id; + u16 sub_device_id; + char *board_name; + char *friendly_name; +}; + +struct trace_module_info { + char *module_name; +}; + +#define NUM_TRACE_MODULES 25 + +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#define PORT_0 0 +#define PORT_1 1 +#define PORT_2 2 +#define PORT_3 3 + +extern struct spad_layout g_spad; +struct spad_layout { + struct nvm_cfg nvm_cfg; + struct mcp_public_data public_data; +}; + +#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */ + +#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE)) + +#define TO_OFFSIZE(_offset, _size) \ + ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \ + (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET))) + +enum spad_sections { + SPAD_SECTION_TRACE, + SPAD_SECTION_NVM_CFG, + SPAD_SECTION_PUBLIC, + SPAD_SECTION_PRIVATE, + SPAD_SECTION_MAX +}; + +#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \ + __builtin_offsetof(struct static_init, f)) + +/* This section is located at a fixed location in the beginning of the + * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade. + * All the rest of data has a floating location which differs from version to + * version, and is pointed by the mcp_meta_data below. + * Moreover, the spad_layout section is part of the MFW firmware, and is loaded + * with it from nvram in order to clear this portion. + */ +struct static_init { + u32 num_sections; + offsize_t sections[SPAD_SECTION_MAX]; +#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_])))) + + u32 tim_hash[8]; +#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash))) + u32 tpu_hash[8]; +#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash))) + u32 secure_pcie_fw_ver; +#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver)))) + u32 secure_running_mfw; +#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw)))) + struct mcp_trace trace; +}; + +#define CRC_MAGIC_VALUE 0xDEBB20E3 +#define CRC32_POLYNOMIAL 0xEDB88320 +#define _KB(x) ((x) * 1024) +#define _MB(x) (_KB(x) * 1024) +#define NVM_CRC_SIZE (sizeof(u32)) +enum nvm_sw_arbitrator { + NVM_SW_ARB_HOST, + NVM_SW_ARB_MCP, + NVM_SW_ARB_UART, + NVM_SW_ARB_RESERVED +}; + +struct legacy_bootstrap_region { + u32 magic_value; +#define NVM_MAGIC_VALUE 0x669955aa + u32 sram_start_addr; + u32 code_len; + u32 code_start_addr; + u32 crc; +}; + +struct nvm_code_entry { + u32 image_type; + u32 nvm_start_addr; + u32 len; + u32 sram_start_addr; + u32 sram_run_addr; +}; + +enum nvm_image_type { + NVM_TYPE_TIM1 = 0x01, + NVM_TYPE_TIM2 = 0x02, + NVM_TYPE_MIM1 = 0x03, + NVM_TYPE_MIM2 = 0x04, + NVM_TYPE_MBA = 0x05, + NVM_TYPE_MODULES_PN = 0x06, + NVM_TYPE_VPD = 0x07, + NVM_TYPE_MFW_TRACE1 = 0x08, + NVM_TYPE_MFW_TRACE2 = 0x09, + NVM_TYPE_NVM_CFG1 = 0x0a, + NVM_TYPE_L2B = 0x0b, + NVM_TYPE_DIR1 = 0x0c, + NVM_TYPE_EAGLE_FW1 = 0x0d, + NVM_TYPE_FALCON_FW1 = 0x0e, + NVM_TYPE_PCIE_FW1 = 0x0f, + NVM_TYPE_HW_SET = 0x10, + NVM_TYPE_LIM = 0x11, + NVM_TYPE_AVS_FW1 = 0x12, + NVM_TYPE_DIR2 = 0x13, + NVM_TYPE_CCM = 0x14, + NVM_TYPE_EAGLE_FW2 = 0x15, + NVM_TYPE_FALCON_FW2 = 0x16, + NVM_TYPE_PCIE_FW2 = 0x17, + NVM_TYPE_AVS_FW2 = 0x18, + NVM_TYPE_INIT_HW = 0x19, + NVM_TYPE_DEFAULT_CFG = 0x1a, + NVM_TYPE_MDUMP = 0x1b, + NVM_TYPE_NVM_META = 0x1c, + NVM_TYPE_ISCSI_CFG = 0x1d, + NVM_TYPE_FCOE_CFG = 0x1f, + NVM_TYPE_ETH_PHY_FW1 = 0x20, + NVM_TYPE_ETH_PHY_FW2 = 0x21, + NVM_TYPE_BDN = 0x22, + NVM_TYPE_8485X_PHY_FW = 0x23, + NVM_TYPE_PUB_KEY = 0x24, + NVM_TYPE_RECOVERY = 0x25, + NVM_TYPE_PLDM = 0x26, + NVM_TYPE_UPK1 = 0x27, + NVM_TYPE_UPK2 = 0x28, + NVM_TYPE_MASTER_KC = 0x29, + NVM_TYPE_BACKUP_KC = 0x2a, + NVM_TYPE_HW_DUMP = 0x2b, + NVM_TYPE_HW_DUMP_OUT = 0x2c, + NVM_TYPE_BIN_NVM_META = 0x30, + NVM_TYPE_ROM_TEST = 0xf0, + NVM_TYPE_88X33X0_PHY_FW = 0x31, + NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32, + NVM_TYPE_IDLE_CHK = 0x33, + NVM_TYPE_MAX, +}; + +#define MAX_NVM_DIR_ENTRIES 100 + +struct nvm_dir_meta { + u32 dir_id; + u32 nvm_dir_addr; + u32 num_images; + u32 next_mfw_to_run; +}; + +struct nvm_dir { + s32 seq; +#define NVM_DIR_NEXT_MFW_MASK 0x00000001 +#define NVM_DIR_SEQ_MASK 0xfffffffe +#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK) +#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\ + ({ \ + _seq = (((_seq + 2) & \ + NVM_DIR_SEQ_MASK) | \ + (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\ + }) + +#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \ + NVM_DIR_SEQ_MASK) + + u32 num_images; + u32 rsrv; + struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */ +}; + +#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \ + ((_num_images) - 1) *\ + sizeof(struct nvm_code_entry) +\ + NVM_CRC_SIZE) + +struct nvm_vpd_image { + u32 format_revision; +#define VPD_IMAGE_VERSION 1 + + u8 vpd_data[1]; +}; + +#define DIR_ID_1 (0) +#define DIR_ID_2 (1) +#define MAX_DIR_IDS (2) + +#define MFW_BUNDLE_1 (0) +#define MFW_BUNDLE_2 (1) +#define MAX_MFW_BUNDLES (2) + +#define FLASH_PAGE_SIZE 0x1000 +#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) +#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200)) + +#define FPGA_MIM_MAX_SIZE (0x40000) + +#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \ + sizeof(struct legacy_bootstrap_region) \ + - NVM_RSV_SIZE) +#define LIM_OFFSET (NVM_OFFSET(lim_image)) +#define NVM_RSV_SIZE (44) +#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE) +#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\ + + (((idx) == NVM_TYPE_MIM2) ? \ + GET_MIM_MAX_SIZE(is_asic, is_e4)\ + : 0)) +#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \ + GET_MIM_MAX_SIZE(is_asic,\ + is_e4) * 2) + +union nvm_dir_union { + struct nvm_dir dir; + u8 page[FLASH_PAGE_SIZE]; +}; + +struct nvm_image { + struct legacy_bootstrap_region bootstrap; + u8 rsrv[NVM_RSV_SIZE]; + u8 lim_image[LIM_MAX_SIZE]; + union nvm_dir_union dir[MAX_MFW_BUNDLES]; +}; + +#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f))))) + +struct hw_set_info { + u32 reg_type; +#define GRC_REG_TYPE 1 +#define PHY_REG_TYPE 2 +#define PCI_REG_TYPE 4 + + u32 bank_num; + u32 pf_num; + u32 operation; +#define READ_OP 1 +#define WRITE_OP 2 +#define RMW_SET_OP 3 +#define RMW_CLR_OP 4 + + u32 reg_addr; + u32 reg_data; + + u32 reset_type; +#define POR_RESET_TYPE BIT(0) +#define HARD_RESET_TYPE BIT(1) +#define CORE_RESET_TYPE BIT(2) +#define MCP_RESET_TYPE BIT(3) +#define PERSET_ASSERT BIT(4) +#define PERSET_DEASSERT BIT(5) +}; + +struct hw_set_image { + u32 format_version; +#define HW_SET_IMAGE_VERSION 1 + u32 no_hw_sets; + struct hw_set_info hw_sets[1]; +}; + +#define MAX_SUPPORTED_NVM_OPTIONS 1000 + +#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff +#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0 +#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000 +#define NVM_META_BIN_OPTION_LEN_OFFSET 16 +#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000 +#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24 +#define NVM_META_BIN_OPTION_ENTITY_GLOB 0 +#define NVM_META_BIN_OPTION_ENTITY_PORT 1 +#define NVM_META_BIN_OPTION_ENTITY_FUNC 2 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1 +#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2 + +struct nvm_meta_bin_t { + u32 magic; +#define NVM_META_BIN_MAGIC 0x669955bb + u32 version; +#define NVM_META_BIN_VERSION 1 + u32 num_options; + u32 options[]; +}; +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c new file mode 100644 index 0000000000..f55eed092f --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -0,0 +1,1340 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright (c) 2019-2020 Marvell International Ltd. */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bug.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/vmalloc.h> +#include "qed.h" +#include "qed_hw.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" + +#define TLV_TYPE(p) (p[0]) +#define TLV_LENGTH(p) (p[1]) +#define TLV_FLAGS(p) (p[3]) + +#define QED_TLV_DATA_MAX (14) +struct qed_tlv_parsed_buf { + /* To be filled with the address to set in Value field */ + void *p_val; + + /* To be used internally in case the value has to be modified */ + u8 data[QED_TLV_DATA_MAX]; +}; + +static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group) +{ + switch (tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + case DRV_TLV_OS_DRIVER_STATES: + case DRV_TLV_PXE_BOOT_PROGRESS: + case DRV_TLV_RX_FRAMES_RECEIVED: + case DRV_TLV_RX_BYTES_RECEIVED: + case DRV_TLV_TX_FRAMES_SENT: + case DRV_TLV_TX_BYTES_SENT: + case DRV_TLV_NPIV_ENABLED: + case DRV_TLV_PCIE_BUS_RX_UTILIZATION: + case DRV_TLV_PCIE_BUS_TX_UTILIZATION: + case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION: + case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED: + case DRV_TLV_NCSI_RX_BYTES_RECEIVED: + case DRV_TLV_NCSI_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_GENERIC; + break; + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + case DRV_TLV_PROMISCUOUS_MODE: + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_IOV_OFFLOAD: + case DRV_TLV_TX_QUEUES_EMPTY: + case DRV_TLV_RX_QUEUES_EMPTY: + case DRV_TLV_TX_QUEUES_FULL: + case DRV_TLV_RX_QUEUES_FULL: + *tlv_group |= QED_MFW_TLV_ETH; + break; + case DRV_TLV_SCSI_TO: + case DRV_TLV_R_T_TOV: + case DRV_TLV_R_A_TOV: + case DRV_TLV_E_D_TOV: + case DRV_TLV_CR_TOV: + case DRV_TLV_BOOT_TYPE: + case DRV_TLV_NPIV_STATE: + case DRV_TLV_NUM_OF_NPIV_IDS: + case DRV_TLV_SWITCH_NAME: + case DRV_TLV_SWITCH_PORT_NUM: + case DRV_TLV_SWITCH_PORT_ID: + case DRV_TLV_VENDOR_NAME: + case DRV_TLV_SWITCH_MODEL: + case DRV_TLV_SWITCH_FW_VER: + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + case DRV_TLV_PORT_ALIAS: + case DRV_TLV_PORT_STATE: + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_LINK_FAILURE_COUNT: + case DRV_TLV_FCOE_BOOT_PROGRESS: + case DRV_TLV_RX_BROADCAST_PACKETS: + case DRV_TLV_TX_BROADCAST_PACKETS: + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + case DRV_TLV_FCOE_TX_FRAMES_SENT: + case DRV_TLV_FCOE_TX_BYTES_SENT: + case DRV_TLV_CRC_ERROR_COUNT: + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + case DRV_TLV_DISPARITY_ERROR_COUNT: + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + case DRV_TLV_LAST_FLOGI_RJT: + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + case DRV_TLV_FDISCS_SENT_COUNT: + case DRV_TLV_FDISC_ACCS_RECEIVED: + case DRV_TLV_FDISC_RJTS_RECEIVED: + case DRV_TLV_PLOGI_SENT_COUNT: + case DRV_TLV_PLOGI_ACCS_RECEIVED: + case DRV_TLV_PLOGI_RJTS_RECEIVED: + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_TIMESTAMP: + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + case DRV_TLV_LOGOS_ISSUED: + case DRV_TLV_LOGO_ACCS_RECEIVED: + case DRV_TLV_LOGO_RJTS_RECEIVED: + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_TIMESTAMP: + case DRV_TLV_LOGOS_RECEIVED: + case DRV_TLV_ACCS_ISSUED: + case DRV_TLV_PRLIS_ISSUED: + case DRV_TLV_ACCS_RECEIVED: + case DRV_TLV_ABTS_SENT_COUNT: + case DRV_TLV_ABTS_ACCS_RECEIVED: + case DRV_TLV_ABTS_RJTS_RECEIVED: + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_TIMESTAMP: + case DRV_TLV_RSCNS_RECEIVED: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + case DRV_TLV_LUN_RESETS_ISSUED: + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + case DRV_TLV_TPRLOS_SENT: + case DRV_TLV_NOS_SENT_COUNT: + case DRV_TLV_NOS_RECEIVED_COUNT: + case DRV_TLV_OLS_COUNT: + case DRV_TLV_LR_COUNT: + case DRV_TLV_LRR_COUNT: + case DRV_TLV_LIP_SENT_COUNT: + case DRV_TLV_LIP_RECEIVED_COUNT: + case DRV_TLV_EOFA_COUNT: + case DRV_TLV_EOFNI_COUNT: + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + *tlv_group = QED_MFW_TLV_FCOE; + break; + case DRV_TLV_TARGET_LLMNR_ENABLED: + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + case DRV_TLV_AUTHENTICATION_METHOD: + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + case DRV_TLV_MAX_FRAME_SIZE: + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + case DRV_TLV_ISCSI_BOOT_PROGRESS: + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + *tlv_group |= QED_MFW_TLV_ISCSI; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* Returns size of the data buffer or, -1 in case TLV data is not available. */ +static int +qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_generic *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_FEATURE_FLAGS: + if (p_drv_buf->flags.b_set) { + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ? + 1 : 0; + p_buf->data[0] |= (p_drv_buf->flags.lso_supported ? + 1 : 0) << 1; + p_buf->p_val = p_buf->data; + return QED_MFW_TLV_FLAGS_SIZE; + } + break; + + case DRV_TLV_LOCAL_ADMIN_ADDR: + case DRV_TLV_ADDITIONAL_MAC_ADDR_1: + case DRV_TLV_ADDITIONAL_MAC_ADDR_2: + { + int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR; + + if (p_drv_buf->mac_set[idx]) { + p_buf->p_val = p_drv_buf->mac[idx]; + return ETH_ALEN; + } + break; + } + + case DRV_TLV_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_eth *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_LSO_MAX_OFFLOAD_SIZE: + if (p_drv_buf->lso_maxoff_size_set) { + p_buf->p_val = &p_drv_buf->lso_maxoff_size; + return sizeof(p_drv_buf->lso_maxoff_size); + } + break; + case DRV_TLV_LSO_MIN_SEGMENT_COUNT: + if (p_drv_buf->lso_minseg_size_set) { + p_buf->p_val = &p_drv_buf->lso_minseg_size; + return sizeof(p_drv_buf->lso_minseg_size); + } + break; + case DRV_TLV_PROMISCUOUS_MODE: + if (p_drv_buf->prom_mode_set) { + p_buf->p_val = &p_drv_buf->prom_mode; + return sizeof(p_drv_buf->prom_mode); + } + break; + case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->tx_descr_size; + return sizeof(p_drv_buf->tx_descr_size); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->rx_descr_size; + return sizeof(p_drv_buf->rx_descr_size); + } + break; + case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG: + if (p_drv_buf->netq_count_set) { + p_buf->p_val = &p_drv_buf->netq_count; + return sizeof(p_drv_buf->netq_count); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4: + if (p_drv_buf->tcp4_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp4_offloads; + return sizeof(p_drv_buf->tcp4_offloads); + } + break; + case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6: + if (p_drv_buf->tcp6_offloads_set) { + p_buf->p_val = &p_drv_buf->tcp6_offloads; + return sizeof(p_drv_buf->tcp6_offloads); + } + break; + case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_descr_qdepth; + return sizeof(p_drv_buf->tx_descr_qdepth); + } + break; + case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_descr_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_descr_qdepth; + return sizeof(p_drv_buf->rx_descr_qdepth); + } + break; + case DRV_TLV_IOV_OFFLOAD: + if (p_drv_buf->iov_offload_set) { + p_buf->p_val = &p_drv_buf->iov_offload; + return sizeof(p_drv_buf->iov_offload); + } + break; + case DRV_TLV_TX_QUEUES_EMPTY: + if (p_drv_buf->txqs_empty_set) { + p_buf->p_val = &p_drv_buf->txqs_empty; + return sizeof(p_drv_buf->txqs_empty); + } + break; + case DRV_TLV_RX_QUEUES_EMPTY: + if (p_drv_buf->rxqs_empty_set) { + p_buf->p_val = &p_drv_buf->rxqs_empty; + return sizeof(p_drv_buf->rxqs_empty); + } + break; + case DRV_TLV_TX_QUEUES_FULL: + if (p_drv_buf->num_txqs_full_set) { + p_buf->p_val = &p_drv_buf->num_txqs_full; + return sizeof(p_drv_buf->num_txqs_full); + } + break; + case DRV_TLV_RX_QUEUES_FULL: + if (p_drv_buf->num_rxqs_full_set) { + p_buf->p_val = &p_drv_buf->num_rxqs_full; + return sizeof(p_drv_buf->num_rxqs_full); + } + break; + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, + struct qed_tlv_parsed_buf *p_buf) +{ + if (!p_time->b_set) + return -1; + + /* Validate numbers */ + if (p_time->month > 12) + p_time->month = 0; + if (p_time->day > 31) + p_time->day = 0; + if (p_time->hour > 23) + p_time->hour = 0; + if (p_time->min > 59) + p_time->min = 0; + if (p_time->msec > 999) + p_time->msec = 0; + if (p_time->usec > 999) + p_time->usec = 0; + + memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX); + snprintf(p_buf->data, 14, "%d%d%d%d%d%d", + p_time->month, p_time->day, + p_time->hour, p_time->min, p_time->msec, p_time->usec); + + p_buf->p_val = p_buf->data; + + return QED_MFW_TLV_TIME_SIZE; +} + +static int +qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_fcoe *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + struct qed_mfw_tlv_time *p_time; + u8 idx; + + switch (p_tlv->tlv_type) { + case DRV_TLV_SCSI_TO: + if (p_drv_buf->scsi_timeout_set) { + p_buf->p_val = &p_drv_buf->scsi_timeout; + return sizeof(p_drv_buf->scsi_timeout); + } + break; + case DRV_TLV_R_T_TOV: + if (p_drv_buf->rt_tov_set) { + p_buf->p_val = &p_drv_buf->rt_tov; + return sizeof(p_drv_buf->rt_tov); + } + break; + case DRV_TLV_R_A_TOV: + if (p_drv_buf->ra_tov_set) { + p_buf->p_val = &p_drv_buf->ra_tov; + return sizeof(p_drv_buf->ra_tov); + } + break; + case DRV_TLV_E_D_TOV: + if (p_drv_buf->ed_tov_set) { + p_buf->p_val = &p_drv_buf->ed_tov; + return sizeof(p_drv_buf->ed_tov); + } + break; + case DRV_TLV_CR_TOV: + if (p_drv_buf->cr_tov_set) { + p_buf->p_val = &p_drv_buf->cr_tov; + return sizeof(p_drv_buf->cr_tov); + } + break; + case DRV_TLV_BOOT_TYPE: + if (p_drv_buf->boot_type_set) { + p_buf->p_val = &p_drv_buf->boot_type; + return sizeof(p_drv_buf->boot_type); + } + break; + case DRV_TLV_NPIV_STATE: + if (p_drv_buf->npiv_state_set) { + p_buf->p_val = &p_drv_buf->npiv_state; + return sizeof(p_drv_buf->npiv_state); + } + break; + case DRV_TLV_NUM_OF_NPIV_IDS: + if (p_drv_buf->num_npiv_ids_set) { + p_buf->p_val = &p_drv_buf->num_npiv_ids; + return sizeof(p_drv_buf->num_npiv_ids); + } + break; + case DRV_TLV_SWITCH_NAME: + if (p_drv_buf->switch_name_set) { + p_buf->p_val = &p_drv_buf->switch_name; + return sizeof(p_drv_buf->switch_name); + } + break; + case DRV_TLV_SWITCH_PORT_NUM: + if (p_drv_buf->switch_portnum_set) { + p_buf->p_val = &p_drv_buf->switch_portnum; + return sizeof(p_drv_buf->switch_portnum); + } + break; + case DRV_TLV_SWITCH_PORT_ID: + if (p_drv_buf->switch_portid_set) { + p_buf->p_val = &p_drv_buf->switch_portid; + return sizeof(p_drv_buf->switch_portid); + } + break; + case DRV_TLV_VENDOR_NAME: + if (p_drv_buf->vendor_name_set) { + p_buf->p_val = &p_drv_buf->vendor_name; + return sizeof(p_drv_buf->vendor_name); + } + break; + case DRV_TLV_SWITCH_MODEL: + if (p_drv_buf->switch_model_set) { + p_buf->p_val = &p_drv_buf->switch_model; + return sizeof(p_drv_buf->switch_model); + } + break; + case DRV_TLV_SWITCH_FW_VER: + if (p_drv_buf->switch_fw_version_set) { + p_buf->p_val = &p_drv_buf->switch_fw_version; + return sizeof(p_drv_buf->switch_fw_version); + } + break; + case DRV_TLV_QOS_PRIORITY_PER_802_1P: + if (p_drv_buf->qos_pri_set) { + p_buf->p_val = &p_drv_buf->qos_pri; + return sizeof(p_drv_buf->qos_pri); + } + break; + case DRV_TLV_PORT_ALIAS: + if (p_drv_buf->port_alias_set) { + p_buf->p_val = &p_drv_buf->port_alias; + return sizeof(p_drv_buf->port_alias); + } + break; + case DRV_TLV_PORT_STATE: + if (p_drv_buf->port_state_set) { + p_buf->p_val = &p_drv_buf->port_state; + return sizeof(p_drv_buf->port_state); + } + break; + case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_tx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_tx_descr_size; + return sizeof(p_drv_buf->fip_tx_descr_size); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->fip_rx_descr_size_set) { + p_buf->p_val = &p_drv_buf->fip_rx_descr_size; + return sizeof(p_drv_buf->fip_rx_descr_size); + } + break; + case DRV_TLV_LINK_FAILURE_COUNT: + if (p_drv_buf->link_failures_set) { + p_buf->p_val = &p_drv_buf->link_failures; + return sizeof(p_drv_buf->link_failures); + } + break; + case DRV_TLV_FCOE_BOOT_PROGRESS: + if (p_drv_buf->fcoe_boot_progress_set) { + p_buf->p_val = &p_drv_buf->fcoe_boot_progress; + return sizeof(p_drv_buf->fcoe_boot_progress); + } + break; + case DRV_TLV_RX_BROADCAST_PACKETS: + if (p_drv_buf->rx_bcast_set) { + p_buf->p_val = &p_drv_buf->rx_bcast; + return sizeof(p_drv_buf->rx_bcast); + } + break; + case DRV_TLV_TX_BROADCAST_PACKETS: + if (p_drv_buf->tx_bcast_set) { + p_buf->p_val = &p_drv_buf->tx_bcast; + return sizeof(p_drv_buf->tx_bcast); + } + break; + case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_txq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_txq_depth; + return sizeof(p_drv_buf->fcoe_txq_depth); + } + break; + case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->fcoe_rxq_depth_set) { + p_buf->p_val = &p_drv_buf->fcoe_rxq_depth; + return sizeof(p_drv_buf->fcoe_rxq_depth); + } + break; + case DRV_TLV_FCOE_RX_FRAMES_RECEIVED: + if (p_drv_buf->fcoe_rx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_frames; + return sizeof(p_drv_buf->fcoe_rx_frames); + } + break; + case DRV_TLV_FCOE_RX_BYTES_RECEIVED: + if (p_drv_buf->fcoe_rx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_rx_bytes; + return sizeof(p_drv_buf->fcoe_rx_bytes); + } + break; + case DRV_TLV_FCOE_TX_FRAMES_SENT: + if (p_drv_buf->fcoe_tx_frames_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_frames; + return sizeof(p_drv_buf->fcoe_tx_frames); + } + break; + case DRV_TLV_FCOE_TX_BYTES_SENT: + if (p_drv_buf->fcoe_tx_bytes_set) { + p_buf->p_val = &p_drv_buf->fcoe_tx_bytes; + return sizeof(p_drv_buf->fcoe_tx_bytes); + } + break; + case DRV_TLV_CRC_ERROR_COUNT: + if (p_drv_buf->crc_count_set) { + p_buf->p_val = &p_drv_buf->crc_count; + return sizeof(p_drv_buf->crc_count); + } + break; + case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->crc_err_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx]; + return sizeof(p_drv_buf->crc_err_src_fcid[idx]); + } + break; + case DRV_TLV_CRC_ERROR_1_TIMESTAMP: + case DRV_TLV_CRC_ERROR_2_TIMESTAMP: + case DRV_TLV_CRC_ERROR_3_TIMESTAMP: + case DRV_TLV_CRC_ERROR_4_TIMESTAMP: + case DRV_TLV_CRC_ERROR_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx], + p_buf); + case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT: + if (p_drv_buf->losync_err_set) { + p_buf->p_val = &p_drv_buf->losync_err; + return sizeof(p_drv_buf->losync_err); + } + break; + case DRV_TLV_LOSS_OF_SIGNAL_ERRORS: + if (p_drv_buf->losig_err_set) { + p_buf->p_val = &p_drv_buf->losig_err; + return sizeof(p_drv_buf->losig_err); + } + break; + case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT: + if (p_drv_buf->primtive_err_set) { + p_buf->p_val = &p_drv_buf->primtive_err; + return sizeof(p_drv_buf->primtive_err); + } + break; + case DRV_TLV_DISPARITY_ERROR_COUNT: + if (p_drv_buf->disparity_err_set) { + p_buf->p_val = &p_drv_buf->disparity_err; + return sizeof(p_drv_buf->disparity_err); + } + break; + case DRV_TLV_CODE_VIOLATION_ERROR_COUNT: + if (p_drv_buf->code_violation_err_set) { + p_buf->p_val = &p_drv_buf->code_violation_err; + return sizeof(p_drv_buf->code_violation_err); + } + break; + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1; + if (p_drv_buf->flogi_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_param[idx]; + return sizeof(p_drv_buf->flogi_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3: + case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4: + idx = p_tlv->tlv_type - + DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1; + + if (p_drv_buf->flogi_acc_param_set[idx]) { + p_buf->p_val = &p_drv_buf->flogi_acc_param[idx]; + return sizeof(p_drv_buf->flogi_acc_param[idx]); + } + break; + case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp, + p_buf); + case DRV_TLV_LAST_FLOGI_RJT: + if (p_drv_buf->flogi_rjt_set) { + p_buf->p_val = &p_drv_buf->flogi_rjt; + return sizeof(p_drv_buf->flogi_rjt); + } + break; + case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP: + return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp, + p_buf); + case DRV_TLV_FDISCS_SENT_COUNT: + if (p_drv_buf->fdiscs_set) { + p_buf->p_val = &p_drv_buf->fdiscs; + return sizeof(p_drv_buf->fdiscs); + } + break; + case DRV_TLV_FDISC_ACCS_RECEIVED: + if (p_drv_buf->fdisc_acc_set) { + p_buf->p_val = &p_drv_buf->fdisc_acc; + return sizeof(p_drv_buf->fdisc_acc); + } + break; + case DRV_TLV_FDISC_RJTS_RECEIVED: + if (p_drv_buf->fdisc_rjt_set) { + p_buf->p_val = &p_drv_buf->fdisc_rjt; + return sizeof(p_drv_buf->fdisc_rjt); + } + break; + case DRV_TLV_PLOGI_SENT_COUNT: + if (p_drv_buf->plogi_set) { + p_buf->p_val = &p_drv_buf->plogi; + return sizeof(p_drv_buf->plogi); + } + break; + case DRV_TLV_PLOGI_ACCS_RECEIVED: + if (p_drv_buf->plogi_acc_set) { + p_buf->p_val = &p_drv_buf->plogi_acc; + return sizeof(p_drv_buf->plogi_acc); + } + break; + case DRV_TLV_PLOGI_RJTS_RECEIVED: + if (p_drv_buf->plogi_rjt_set) { + p_buf->p_val = &p_drv_buf->plogi_rjt; + return sizeof(p_drv_buf->plogi_rjt); + } + break; + case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->plogi_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx]; + return sizeof(p_drv_buf->plogi_dst_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_TIMESTAMP: + case DRV_TLV_PLOGI_2_TIMESTAMP: + case DRV_TLV_PLOGI_3_TIMESTAMP: + case DRV_TLV_PLOGI_4_TIMESTAMP: + case DRV_TLV_PLOGI_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx], + p_buf); + case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2; + + if (p_drv_buf->plogi_acc_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx]; + return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]); + } + break; + case DRV_TLV_PLOGI_1_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_2_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_3_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_4_ACC_TIMESTAMP: + case DRV_TLV_PLOGI_5_ACC_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2; + p_time = &p_drv_buf->plogi_acc_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + case DRV_TLV_LOGOS_ISSUED: + if (p_drv_buf->tx_plogos_set) { + p_buf->p_val = &p_drv_buf->tx_plogos; + return sizeof(p_drv_buf->tx_plogos); + } + break; + case DRV_TLV_LOGO_ACCS_RECEIVED: + if (p_drv_buf->plogo_acc_set) { + p_buf->p_val = &p_drv_buf->plogo_acc; + return sizeof(p_drv_buf->plogo_acc); + } + break; + case DRV_TLV_LOGO_RJTS_RECEIVED: + if (p_drv_buf->plogo_rjt_set) { + p_buf->p_val = &p_drv_buf->plogo_rjt; + return sizeof(p_drv_buf->plogo_rjt); + } + break; + case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID: + case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) / + 2; + + if (p_drv_buf->plogo_src_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx]; + return sizeof(p_drv_buf->plogo_src_fcid[idx]); + } + break; + case DRV_TLV_LOGO_1_TIMESTAMP: + case DRV_TLV_LOGO_2_TIMESTAMP: + case DRV_TLV_LOGO_3_TIMESTAMP: + case DRV_TLV_LOGO_4_TIMESTAMP: + case DRV_TLV_LOGO_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx], + p_buf); + case DRV_TLV_LOGOS_RECEIVED: + if (p_drv_buf->rx_logos_set) { + p_buf->p_val = &p_drv_buf->rx_logos; + return sizeof(p_drv_buf->rx_logos); + } + break; + case DRV_TLV_ACCS_ISSUED: + if (p_drv_buf->tx_accs_set) { + p_buf->p_val = &p_drv_buf->tx_accs; + return sizeof(p_drv_buf->tx_accs); + } + break; + case DRV_TLV_PRLIS_ISSUED: + if (p_drv_buf->tx_prlis_set) { + p_buf->p_val = &p_drv_buf->tx_prlis; + return sizeof(p_drv_buf->tx_prlis); + } + break; + case DRV_TLV_ACCS_RECEIVED: + if (p_drv_buf->rx_accs_set) { + p_buf->p_val = &p_drv_buf->rx_accs; + return sizeof(p_drv_buf->rx_accs); + } + break; + case DRV_TLV_ABTS_SENT_COUNT: + if (p_drv_buf->tx_abts_set) { + p_buf->p_val = &p_drv_buf->tx_abts; + return sizeof(p_drv_buf->tx_abts); + } + break; + case DRV_TLV_ABTS_ACCS_RECEIVED: + if (p_drv_buf->rx_abts_acc_set) { + p_buf->p_val = &p_drv_buf->rx_abts_acc; + return sizeof(p_drv_buf->rx_abts_acc); + } + break; + case DRV_TLV_ABTS_RJTS_RECEIVED: + if (p_drv_buf->rx_abts_rjt_set) { + p_buf->p_val = &p_drv_buf->rx_abts_rjt; + return sizeof(p_drv_buf->rx_abts_rjt); + } + break; + case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID: + case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID: + idx = (p_tlv->tlv_type - + DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2; + + if (p_drv_buf->abts_dst_fcid_set[idx]) { + p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx]; + return sizeof(p_drv_buf->abts_dst_fcid[idx]); + } + break; + case DRV_TLV_ABTS_1_TIMESTAMP: + case DRV_TLV_ABTS_2_TIMESTAMP: + case DRV_TLV_ABTS_3_TIMESTAMP: + case DRV_TLV_ABTS_4_TIMESTAMP: + case DRV_TLV_ABTS_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2; + + return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx], + p_buf); + case DRV_TLV_RSCNS_RECEIVED: + if (p_drv_buf->rx_rscn_set) { + p_buf->p_val = &p_drv_buf->rx_rscn; + return sizeof(p_drv_buf->rx_rscn); + } + break; + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3: + case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4: + idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1; + + if (p_drv_buf->rx_rscn_nport_set[idx]) { + p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx]; + return sizeof(p_drv_buf->rx_rscn_nport[idx]); + } + break; + case DRV_TLV_LUN_RESETS_ISSUED: + if (p_drv_buf->tx_lun_rst_set) { + p_buf->p_val = &p_drv_buf->tx_lun_rst; + return sizeof(p_drv_buf->tx_lun_rst); + } + break; + case DRV_TLV_ABORT_TASK_SETS_ISSUED: + if (p_drv_buf->abort_task_sets_set) { + p_buf->p_val = &p_drv_buf->abort_task_sets; + return sizeof(p_drv_buf->abort_task_sets); + } + break; + case DRV_TLV_TPRLOS_SENT: + if (p_drv_buf->tx_tprlos_set) { + p_buf->p_val = &p_drv_buf->tx_tprlos; + return sizeof(p_drv_buf->tx_tprlos); + } + break; + case DRV_TLV_NOS_SENT_COUNT: + if (p_drv_buf->tx_nos_set) { + p_buf->p_val = &p_drv_buf->tx_nos; + return sizeof(p_drv_buf->tx_nos); + } + break; + case DRV_TLV_NOS_RECEIVED_COUNT: + if (p_drv_buf->rx_nos_set) { + p_buf->p_val = &p_drv_buf->rx_nos; + return sizeof(p_drv_buf->rx_nos); + } + break; + case DRV_TLV_OLS_COUNT: + if (p_drv_buf->ols_set) { + p_buf->p_val = &p_drv_buf->ols; + return sizeof(p_drv_buf->ols); + } + break; + case DRV_TLV_LR_COUNT: + if (p_drv_buf->lr_set) { + p_buf->p_val = &p_drv_buf->lr; + return sizeof(p_drv_buf->lr); + } + break; + case DRV_TLV_LRR_COUNT: + if (p_drv_buf->lrr_set) { + p_buf->p_val = &p_drv_buf->lrr; + return sizeof(p_drv_buf->lrr); + } + break; + case DRV_TLV_LIP_SENT_COUNT: + if (p_drv_buf->tx_lip_set) { + p_buf->p_val = &p_drv_buf->tx_lip; + return sizeof(p_drv_buf->tx_lip); + } + break; + case DRV_TLV_LIP_RECEIVED_COUNT: + if (p_drv_buf->rx_lip_set) { + p_buf->p_val = &p_drv_buf->rx_lip; + return sizeof(p_drv_buf->rx_lip); + } + break; + case DRV_TLV_EOFA_COUNT: + if (p_drv_buf->eofa_set) { + p_buf->p_val = &p_drv_buf->eofa; + return sizeof(p_drv_buf->eofa); + } + break; + case DRV_TLV_EOFNI_COUNT: + if (p_drv_buf->eofni_set) { + p_buf->p_val = &p_drv_buf->eofni; + return sizeof(p_drv_buf->eofni); + } + break; + case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT: + if (p_drv_buf->scsi_chks_set) { + p_buf->p_val = &p_drv_buf->scsi_chks; + return sizeof(p_drv_buf->scsi_chks); + } + break; + case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_cond_met; + return sizeof(p_drv_buf->scsi_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_BUSY_COUNT: + if (p_drv_buf->scsi_busy_set) { + p_buf->p_val = &p_drv_buf->scsi_busy; + return sizeof(p_drv_buf->scsi_busy); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT: + if (p_drv_buf->scsi_inter_set) { + p_buf->p_val = &p_drv_buf->scsi_inter; + return sizeof(p_drv_buf->scsi_inter); + } + break; + case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT: + if (p_drv_buf->scsi_inter_cond_met_set) { + p_buf->p_val = &p_drv_buf->scsi_inter_cond_met; + return sizeof(p_drv_buf->scsi_inter_cond_met); + } + break; + case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT: + if (p_drv_buf->scsi_rsv_conflicts_set) { + p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts; + return sizeof(p_drv_buf->scsi_rsv_conflicts); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT: + if (p_drv_buf->scsi_tsk_full_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_full; + return sizeof(p_drv_buf->scsi_tsk_full); + } + break; + case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT: + if (p_drv_buf->scsi_aca_active_set) { + p_buf->p_val = &p_drv_buf->scsi_aca_active; + return sizeof(p_drv_buf->scsi_aca_active); + } + break; + case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT: + if (p_drv_buf->scsi_tsk_abort_set) { + p_buf->p_val = &p_drv_buf->scsi_tsk_abort; + return sizeof(p_drv_buf->scsi_tsk_abort); + } + break; + case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ: + case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ: + idx = (p_tlv->tlv_type - + DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2; + + if (p_drv_buf->scsi_rx_chk_set[idx]) { + p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx]; + return sizeof(p_drv_buf->scsi_rx_chk[idx]); + } + break; + case DRV_TLV_SCSI_CHECK_1_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_2_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_3_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_4_TIMESTAMP: + case DRV_TLV_SCSI_CHECK_5_TIMESTAMP: + idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2; + p_time = &p_drv_buf->scsi_chk_tstamp[idx]; + + return qed_mfw_get_tlv_time_value(p_time, p_buf); + default: + break; + } + + return -1; +} + +static int +qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv, + struct qed_mfw_tlv_iscsi *p_drv_buf, + struct qed_tlv_parsed_buf *p_buf) +{ + switch (p_tlv->tlv_type) { + case DRV_TLV_TARGET_LLMNR_ENABLED: + if (p_drv_buf->target_llmnr_set) { + p_buf->p_val = &p_drv_buf->target_llmnr; + return sizeof(p_drv_buf->target_llmnr); + } + break; + case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED: + if (p_drv_buf->header_digest_set) { + p_buf->p_val = &p_drv_buf->header_digest; + return sizeof(p_drv_buf->header_digest); + } + break; + case DRV_TLV_DATA_DIGEST_FLAG_ENABLED: + if (p_drv_buf->data_digest_set) { + p_buf->p_val = &p_drv_buf->data_digest; + return sizeof(p_drv_buf->data_digest); + } + break; + case DRV_TLV_AUTHENTICATION_METHOD: + if (p_drv_buf->auth_method_set) { + p_buf->p_val = &p_drv_buf->auth_method; + return sizeof(p_drv_buf->auth_method); + } + break; + case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL: + if (p_drv_buf->boot_taget_portal_set) { + p_buf->p_val = &p_drv_buf->boot_taget_portal; + return sizeof(p_drv_buf->boot_taget_portal); + } + break; + case DRV_TLV_MAX_FRAME_SIZE: + if (p_drv_buf->frame_size_set) { + p_buf->p_val = &p_drv_buf->frame_size; + return sizeof(p_drv_buf->frame_size); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->tx_desc_size_set) { + p_buf->p_val = &p_drv_buf->tx_desc_size; + return sizeof(p_drv_buf->tx_desc_size); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE: + if (p_drv_buf->rx_desc_size_set) { + p_buf->p_val = &p_drv_buf->rx_desc_size; + return sizeof(p_drv_buf->rx_desc_size); + } + break; + case DRV_TLV_ISCSI_BOOT_PROGRESS: + if (p_drv_buf->boot_progress_set) { + p_buf->p_val = &p_drv_buf->boot_progress; + return sizeof(p_drv_buf->boot_progress); + } + break; + case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH: + if (p_drv_buf->tx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->tx_desc_qdepth; + return sizeof(p_drv_buf->tx_desc_qdepth); + } + break; + case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH: + if (p_drv_buf->rx_desc_qdepth_set) { + p_buf->p_val = &p_drv_buf->rx_desc_qdepth; + return sizeof(p_drv_buf->rx_desc_qdepth); + } + break; + case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED: + if (p_drv_buf->rx_frames_set) { + p_buf->p_val = &p_drv_buf->rx_frames; + return sizeof(p_drv_buf->rx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED: + if (p_drv_buf->rx_bytes_set) { + p_buf->p_val = &p_drv_buf->rx_bytes; + return sizeof(p_drv_buf->rx_bytes); + } + break; + case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT: + if (p_drv_buf->tx_frames_set) { + p_buf->p_val = &p_drv_buf->tx_frames; + return sizeof(p_drv_buf->tx_frames); + } + break; + case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT: + if (p_drv_buf->tx_bytes_set) { + p_buf->p_val = &p_drv_buf->tx_bytes; + return sizeof(p_drv_buf->tx_bytes); + } + break; + default: + break; + } + + return -1; +} + +static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn, + u8 tlv_group, u8 *p_mfw_buf, u32 size) +{ + union qed_mfw_tlv_data *p_tlv_data; + struct qed_tlv_parsed_buf buffer; + struct qed_drv_tlv_hdr tlv; + int len = 0; + u32 offset; + u8 *p_tlv; + + p_tlv_data = vzalloc(sizeof(*p_tlv_data)); + if (!p_tlv_data) + return -ENOMEM; + + if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) { + vfree(p_tlv_data); + return -EINVAL; + } + + memset(&tlv, 0, sizeof(tlv)); + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_tlv = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_tlv); + tlv.tlv_length = TLV_LENGTH(p_tlv); + tlv.tlv_flags = TLV_FLAGS(p_tlv); + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Type %d length = %d flags = 0x%x\n", tlv.tlv_type, + tlv.tlv_length, tlv.tlv_flags); + + if (tlv_group == QED_MFW_TLV_GENERIC) + len = qed_mfw_get_gen_tlv_value(&tlv, + &p_tlv_data->generic, + &buffer); + else if (tlv_group == QED_MFW_TLV_ETH) + len = qed_mfw_get_eth_tlv_value(&tlv, + &p_tlv_data->eth, + &buffer); + else if (tlv_group == QED_MFW_TLV_FCOE) + len = qed_mfw_get_fcoe_tlv_value(&tlv, + &p_tlv_data->fcoe, + &buffer); + else + len = qed_mfw_get_iscsi_tlv_value(&tlv, + &p_tlv_data->iscsi, + &buffer); + + if (len > 0) { + WARN(len > 4 * tlv.tlv_length, + "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n", + len, 4 * tlv.tlv_length); + len = min_t(int, len, 4 * tlv.tlv_length); + tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED; + TLV_FLAGS(p_tlv) = tlv.tlv_flags; + memcpy(p_mfw_buf + offset + sizeof(tlv), + buffer.p_val, len); + } + } + + vfree(p_tlv_data); + + return 0; +} + +int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 addr, size, offset, resp, param, val, global_offsize, global_addr; + u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp; + struct qed_drv_tlv_hdr tlv; + int rc; + + addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_GLOBAL); + global_offsize = qed_rd(p_hwfn, p_ptt, addr); + global_addr = SECTION_ADDR(global_offsize, 0); + addr = global_addr + offsetof(struct public_global, data_ptr); + addr = qed_rd(p_hwfn, p_ptt, addr); + size = qed_rd(p_hwfn, p_ptt, global_addr + + offsetof(struct public_global, data_size)); + + if (!size) { + DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size); + goto drv_done; + } + + p_mfw_buf = vzalloc(size); + if (!p_mfw_buf) { + DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n"); + goto drv_done; + } + + /* Read the TLV request to local buffer. MFW represents the TLV in + * little endian format and mcp returns it bigendian format. Hence + * driver need to convert data to little endian first and then do the + * memcpy (casting) to preserve the MFW TLV format in the driver buffer. + * + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + val = qed_rd(p_hwfn, p_ptt, addr + offset); + val = be32_to_cpu((__force __be32)val); + memcpy(&p_mfw_buf[offset], &val, sizeof(u32)); + } + + /* Parse the headers to enumerate the requested TLV groups */ + for (offset = 0; offset < size; + offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) { + p_temp = &p_mfw_buf[offset]; + tlv.tlv_type = TLV_TYPE(p_temp); + tlv.tlv_length = TLV_LENGTH(p_temp); + if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group)) + DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, + "Un recognized TLV %d\n", tlv.tlv_type); + } + + /* Sanitize the TLV groups according to personality */ + if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping L2 TLVs for non-L2 function\n"); + tlv_group &= ~QED_MFW_TLV_ETH; + } + + if ((tlv_group & QED_MFW_TLV_FCOE) && + p_hwfn->hw_info.personality != QED_PCI_FCOE) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping FCoE TLVs for non-FCoE function\n"); + tlv_group &= ~QED_MFW_TLV_FCOE; + } + + if ((tlv_group & QED_MFW_TLV_ISCSI) && + p_hwfn->hw_info.personality != QED_PCI_ISCSI && + p_hwfn->hw_info.personality != QED_PCI_NVMETCP) { + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "Skipping iSCSI TLVs for non-iSCSI function\n"); + tlv_group &= ~QED_MFW_TLV_ISCSI; + } + + /* Update the TLV values in the local buffer */ + for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) { + if (tlv_group & id) + if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size)) + goto drv_done; + } + + /* Write the TLV data to shared memory. The stream of 4 bytes first need + * to be mem-copied to u32 element to make it as LSB format. And then + * converted to big endian as required by mcp-write. + */ + for (offset = 0; offset < size; offset += sizeof(u32)) { + memcpy(&val, &p_mfw_buf[offset], sizeof(u32)); + val = (__force u32)cpu_to_be32(val); + qed_wr(p_hwfn, p_ptt, addr + offset, val); + } + +drv_done: + rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp, + ¶m); + + vfree(p_mfw_buf); + + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c new file mode 100644 index 0000000000..f19128c8d9 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.c @@ -0,0 +1,829 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright 2021 Marvell. All rights reserved. */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <asm/param.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/list.h> +#include <linux/qed/qed_nvmetcp_if.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_nvmetcp.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_sp.h" +#include "qed_reg_addr.h" +#include "qed_nvmetcp_fw_funcs.h" + +static int qed_nvmetcp_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + u16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + if (p_hwfn->p_nvmetcp_info->event_cb) { + struct qed_nvmetcp_info *p_nvmetcp = p_hwfn->p_nvmetcp_info; + + return p_nvmetcp->event_cb(p_nvmetcp->event_context, + fw_event_code, data); + } else { + DP_NOTICE(p_hwfn, "nvmetcp async completion is not set\n"); + + return -EINVAL; + } +} + +static int qed_sp_nvmetcp_func_start(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr, + void *event_context, + nvmetcp_event_cb_t async_event_cb) +{ + struct nvmetcp_init_ramrod_params *p_ramrod = NULL; + struct qed_nvmetcp_pf_params *p_params = NULL; + struct scsi_init_func_queues *p_queue = NULL; + struct nvmetcp_spe_func_init *p_init = NULL; + struct qed_sp_init_data init_data = {}; + struct qed_spq_entry *p_ent = NULL; + int rc = 0; + u16 val; + u8 i; + + /* Get SPQ entry */ + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_INIT_FUNC, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.nvmetcp_init; + p_init = &p_ramrod->nvmetcp_init_spe; + p_params = &p_hwfn->pf_params.nvmetcp_pf_params; + p_queue = &p_init->q_params; + p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; + p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; + p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; + p_init->ll2_rx_queue_id = RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) + + p_params->ll2_ooo_queue_id; + SET_FIELD(p_init->flags, NVMETCP_SPE_FUNC_INIT_NVMETCP_MODE, 1); + p_init->func_params.log_page_size = ilog2(PAGE_SIZE); + p_init->func_params.num_tasks = cpu_to_le16(p_params->num_tasks); + p_init->debug_flags = p_params->debug_mode; + DMA_REGPAIR_LE(p_queue->glbl_q_params_addr, + p_params->glbl_q_params_addr); + p_queue->cq_num_entries = cpu_to_le16(QED_NVMETCP_FW_CQ_SIZE); + p_queue->num_queues = p_params->num_queues; + val = RESC_START(p_hwfn, QED_CMDQS_CQS); + p_queue->queue_relative_offset = cpu_to_le16((u16)val); + p_queue->cq_sb_pi = p_params->gl_rq_pi; + + for (i = 0; i < p_params->num_queues; i++) { + val = qed_get_igu_sb_id(p_hwfn, i); + p_queue->cq_cmdq_sb_num_arr[i] = cpu_to_le16(val); + } + + SET_FIELD(p_queue->q_validity, + SCSI_INIT_FUNC_QUEUES_CMD_VALID, 0); + p_queue->cmdq_num_entries = 0; + p_queue->bdq_resource_id = (u8)RESC_START(p_hwfn, QED_BDQ); + p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(QED_TCP_TWO_MSL_TIMER); + p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(QED_TCP_SWS_TIMER); + p_init->half_way_close_timeout = cpu_to_le16(QED_TCP_HALF_WAY_CLOSE_TIMEOUT); + p_ramrod->tcp_init.max_fin_rt = QED_TCP_MAX_FIN_RT; + SET_FIELD(p_ramrod->nvmetcp_init_spe.params, + NVMETCP_SPE_FUNC_INIT_MAX_SYN_RT, QED_TCP_MAX_FIN_RT); + p_hwfn->p_nvmetcp_info->event_context = event_context; + p_hwfn->p_nvmetcp_info->event_cb = async_event_cb; + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_TCP_ULP, + qed_nvmetcp_async_event); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_nvmetcp_func_stop(struct qed_hwfn *p_hwfn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_DESTROY_FUNC, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_TCP_ULP); + + return rc; +} + +static int qed_fill_nvmetcp_dev_info(struct qed_dev *cdev, + struct qed_dev_nvmetcp_info *info) +{ + struct qed_hwfn *hwfn = QED_AFFIN_HWFN(cdev); + int rc; + + memset(info, 0, sizeof(*info)); + rc = qed_fill_dev_info(cdev, &info->common); + info->port_id = MFW_PORT(hwfn); + info->num_cqs = FEAT_NUM(hwfn, QED_NVMETCP_CQ); + + return rc; +} + +static void qed_register_nvmetcp_ops(struct qed_dev *cdev, + struct qed_nvmetcp_cb_ops *ops, + void *cookie) +{ + cdev->protocol_ops.nvmetcp = ops; + cdev->ops_cookie = cookie; +} + +static int qed_nvmetcp_stop(struct qed_dev *cdev) +{ + int rc; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) { + DP_NOTICE(cdev, "nvmetcp already stopped\n"); + + return 0; + } + + if (!hash_empty(cdev->connections)) { + DP_NOTICE(cdev, + "Can't stop nvmetcp - not all connections were returned\n"); + + return -EINVAL; + } + + /* Stop the nvmetcp */ + rc = qed_sp_nvmetcp_func_stop(QED_AFFIN_HWFN(cdev), QED_SPQ_MODE_EBLOCK, + NULL); + cdev->flags &= ~QED_FLAG_STORAGE_STARTED; + + return rc; +} + +static int qed_nvmetcp_start(struct qed_dev *cdev, + struct qed_nvmetcp_tid *tasks, + void *event_context, + nvmetcp_event_cb_t async_event_cb) +{ + struct qed_tid_mem *tid_info; + int rc; + + if (cdev->flags & QED_FLAG_STORAGE_STARTED) { + DP_NOTICE(cdev, "nvmetcp already started;\n"); + + return 0; + } + + rc = qed_sp_nvmetcp_func_start(QED_AFFIN_HWFN(cdev), + QED_SPQ_MODE_EBLOCK, NULL, + event_context, async_event_cb); + if (rc) { + DP_NOTICE(cdev, "Failed to start nvmetcp\n"); + + return rc; + } + + cdev->flags |= QED_FLAG_STORAGE_STARTED; + hash_init(cdev->connections); + + if (!tasks) + return 0; + + tid_info = kzalloc(sizeof(*tid_info), GFP_KERNEL); + if (!tid_info) { + qed_nvmetcp_stop(cdev); + + return -ENOMEM; + } + + rc = qed_cxt_get_tid_mem_info(QED_AFFIN_HWFN(cdev), tid_info); + if (rc) { + DP_NOTICE(cdev, "Failed to gather task information\n"); + qed_nvmetcp_stop(cdev); + kfree(tid_info); + + return rc; + } + + /* Fill task information */ + tasks->size = tid_info->tid_size; + tasks->num_tids_per_block = tid_info->num_tids_per_block; + memcpy(tasks->blocks, tid_info->blocks, + MAX_TID_BLOCKS_NVMETCP * sizeof(u8 *)); + kfree(tid_info); + + return 0; +} + +static struct qed_hash_nvmetcp_con *qed_nvmetcp_get_hash(struct qed_dev *cdev, + u32 handle) +{ + struct qed_hash_nvmetcp_con *hash_con = NULL; + + if (!(cdev->flags & QED_FLAG_STORAGE_STARTED)) + return NULL; + + hash_for_each_possible(cdev->connections, hash_con, node, handle) { + if (hash_con->con->icid == handle) + break; + } + + if (!hash_con || hash_con->con->icid != handle) + return NULL; + + return hash_con; +} + +static int qed_sp_nvmetcp_conn_offload(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct nvmetcp_spe_conn_offload *p_ramrod = NULL; + struct tcp_offload_params_opt2 *p_tcp = NULL; + struct qed_sp_init_data init_data = { 0 }; + struct qed_spq_entry *p_ent = NULL; + dma_addr_t r2tq_pbl_addr; + dma_addr_t xhq_pbl_addr; + dma_addr_t uhq_pbl_addr; + u16 physical_q; + int rc = 0; + u8 i; + + /* Get SPQ entry */ + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_OFFLOAD_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.nvmetcp_conn_offload; + + /* Transmission PQ is the first of the PF */ + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_OFLD); + p_conn->physical_q0 = cpu_to_le16(physical_q); + p_ramrod->nvmetcp.physical_q0 = cpu_to_le16(physical_q); + + /* nvmetcp Pure-ACK PQ */ + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_ACK); + p_conn->physical_q1 = cpu_to_le16(physical_q); + p_ramrod->nvmetcp.physical_q1 = cpu_to_le16(physical_q); + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + DMA_REGPAIR_LE(p_ramrod->nvmetcp.sq_pbl_addr, p_conn->sq_pbl_addr); + r2tq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->r2tq); + DMA_REGPAIR_LE(p_ramrod->nvmetcp.r2tq_pbl_addr, r2tq_pbl_addr); + xhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->xhq); + DMA_REGPAIR_LE(p_ramrod->nvmetcp.xhq_pbl_addr, xhq_pbl_addr); + uhq_pbl_addr = qed_chain_get_pbl_phys(&p_conn->uhq); + DMA_REGPAIR_LE(p_ramrod->nvmetcp.uhq_pbl_addr, uhq_pbl_addr); + p_ramrod->nvmetcp.flags = p_conn->offl_flags; + p_ramrod->nvmetcp.default_cq = p_conn->default_cq; + p_ramrod->nvmetcp.initial_ack = 0; + DMA_REGPAIR_LE(p_ramrod->nvmetcp.nvmetcp.cccid_itid_table_addr, + p_conn->nvmetcp_cccid_itid_table_addr); + p_ramrod->nvmetcp.nvmetcp.cccid_max_range = + cpu_to_le16(p_conn->nvmetcp_cccid_max_range); + p_tcp = &p_ramrod->tcp; + qed_set_fw_mac_addr(&p_tcp->remote_mac_addr_hi, + &p_tcp->remote_mac_addr_mid, + &p_tcp->remote_mac_addr_lo, p_conn->remote_mac); + qed_set_fw_mac_addr(&p_tcp->local_mac_addr_hi, + &p_tcp->local_mac_addr_mid, + &p_tcp->local_mac_addr_lo, p_conn->local_mac); + p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); + p_tcp->flags = cpu_to_le16(p_conn->tcp_flags); + p_tcp->ip_version = p_conn->ip_version; + if (p_tcp->ip_version == TCP_IPV6) { + for (i = 0; i < 4; i++) { + p_tcp->remote_ip[i] = cpu_to_le32(p_conn->remote_ip[i]); + p_tcp->local_ip[i] = cpu_to_le32(p_conn->local_ip[i]); + } + } else { + p_tcp->remote_ip[0] = cpu_to_le32(p_conn->remote_ip[0]); + p_tcp->local_ip[0] = cpu_to_le32(p_conn->local_ip[0]); + } + + p_tcp->flow_label = cpu_to_le32(p_conn->flow_label); + p_tcp->ttl = p_conn->ttl; + p_tcp->tos_or_tc = p_conn->tos_or_tc; + p_tcp->remote_port = cpu_to_le16(p_conn->remote_port); + p_tcp->local_port = cpu_to_le16(p_conn->local_port); + p_tcp->mss = cpu_to_le16(p_conn->mss); + p_tcp->rcv_wnd_scale = p_conn->rcv_wnd_scale; + p_tcp->connect_mode = p_conn->connect_mode; + p_tcp->cwnd = cpu_to_le32(p_conn->cwnd); + p_tcp->ka_max_probe_cnt = p_conn->ka_max_probe_cnt; + p_tcp->ka_timeout = cpu_to_le32(p_conn->ka_timeout); + p_tcp->max_rt_time = cpu_to_le32(p_conn->max_rt_time); + p_tcp->ka_interval = cpu_to_le32(p_conn->ka_interval); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_nvmetcp_conn_update(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct nvmetcp_conn_update_ramrod_params *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u32 dval; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_UPDATE_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.nvmetcp_conn_update; + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->flags = p_conn->update_flag; + p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size); + dval = p_conn->max_recv_pdu_length; + p_ramrod->max_recv_pdu_length = cpu_to_le32(dval); + dval = p_conn->max_send_pdu_length; + p_ramrod->max_send_pdu_length = cpu_to_le32(dval); + p_ramrod->first_seq_length = cpu_to_le32(p_conn->first_seq_length); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_nvmetcp_conn_terminate(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct nvmetcp_spe_conn_termination *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_TERMINATION_CONN, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.nvmetcp_conn_terminate; + p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id); + p_ramrod->abortive = p_conn->abortive_dsconnect; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_nvmetcp_conn_clear_sq(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_addr) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_conn->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_addr; + rc = qed_sp_init_request(p_hwfn, &p_ent, + NVMETCP_RAMROD_CMD_ID_CLEAR_SQ, + PROTOCOLID_TCP_ULP, &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static void __iomem *qed_nvmetcp_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) +{ + return (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr(cid, DQ_DEMS_LEGACY); +} + +static int qed_nvmetcp_allocate_connection(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn **p_out_conn) +{ + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + }; + struct qed_nvmetcp_pf_params *p_params = NULL; + struct qed_nvmetcp_conn *p_conn = NULL; + int rc = 0; + + /* Try finding a free connection that can be used */ + spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); + if (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) + p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list, + struct qed_nvmetcp_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); + *p_out_conn = p_conn; + + return 0; + } + spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); + + /* Need to allocate a new connection */ + p_params = &p_hwfn->pf_params.nvmetcp_pf_params; + p_conn = kzalloc(sizeof(*p_conn), GFP_KERNEL); + if (!p_conn) + return -ENOMEM; + + params.num_elems = p_params->num_r2tq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct nvmetcp_wqe); + params.elem_size = sizeof(struct nvmetcp_wqe); + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->r2tq, ¶ms); + if (rc) + goto nomem_r2tq; + + params.num_elems = p_params->num_uhq_pages_in_ring * + QED_CHAIN_PAGE_SIZE / sizeof(struct iscsi_uhqe); + params.elem_size = sizeof(struct iscsi_uhqe); + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->uhq, ¶ms); + if (rc) + goto nomem_uhq; + + params.elem_size = sizeof(struct iscsi_xhqe); + rc = qed_chain_alloc(p_hwfn->cdev, &p_conn->xhq, ¶ms); + if (rc) + goto nomem; + + p_conn->free_on_delete = true; + *p_out_conn = p_conn; + + return 0; + +nomem: + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); +nomem_uhq: + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); +nomem_r2tq: + kfree(p_conn); + + return -ENOMEM; +} + +static int qed_nvmetcp_acquire_connection(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn **p_out_conn) +{ + struct qed_nvmetcp_conn *p_conn = NULL; + int rc = 0; + u32 icid; + + spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); + rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_TCP_ULP, &icid); + spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); + + if (rc) + return rc; + + rc = qed_nvmetcp_allocate_connection(p_hwfn, &p_conn); + if (rc) { + spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); + qed_cxt_release_cid(p_hwfn, icid); + spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); + + return rc; + } + + p_conn->icid = icid; + p_conn->conn_id = (u16)icid; + p_conn->fw_cid = (p_hwfn->hw_info.opaque_fid << 16) | icid; + *p_out_conn = p_conn; + + return rc; +} + +static void qed_nvmetcp_release_connection(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn) +{ + spin_lock_bh(&p_hwfn->p_nvmetcp_info->lock); + list_add_tail(&p_conn->list_entry, &p_hwfn->p_nvmetcp_info->free_list); + qed_cxt_release_cid(p_hwfn, p_conn->icid); + spin_unlock_bh(&p_hwfn->p_nvmetcp_info->lock); +} + +static void qed_nvmetcp_free_connection(struct qed_hwfn *p_hwfn, + struct qed_nvmetcp_conn *p_conn) +{ + qed_chain_free(p_hwfn->cdev, &p_conn->xhq); + qed_chain_free(p_hwfn->cdev, &p_conn->uhq); + qed_chain_free(p_hwfn->cdev, &p_conn->r2tq); + kfree(p_conn); +} + +int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_nvmetcp_info *p_nvmetcp_info; + + p_nvmetcp_info = kzalloc(sizeof(*p_nvmetcp_info), GFP_KERNEL); + if (!p_nvmetcp_info) + return -ENOMEM; + + INIT_LIST_HEAD(&p_nvmetcp_info->free_list); + p_hwfn->p_nvmetcp_info = p_nvmetcp_info; + + return 0; +} + +void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) +{ + spin_lock_init(&p_hwfn->p_nvmetcp_info->lock); +} + +void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) +{ + struct qed_nvmetcp_conn *p_conn = NULL; + + if (!p_hwfn->p_nvmetcp_info) + return; + + while (!list_empty(&p_hwfn->p_nvmetcp_info->free_list)) { + p_conn = list_first_entry(&p_hwfn->p_nvmetcp_info->free_list, + struct qed_nvmetcp_conn, list_entry); + if (p_conn) { + list_del(&p_conn->list_entry); + qed_nvmetcp_free_connection(p_hwfn, p_conn); + } + } + + kfree(p_hwfn->p_nvmetcp_info); + p_hwfn->p_nvmetcp_info = NULL; +} + +static int qed_nvmetcp_acquire_conn(struct qed_dev *cdev, + u32 *handle, + u32 *fw_cid, void __iomem **p_doorbell) +{ + struct qed_hash_nvmetcp_con *hash_con; + int rc; + + /* Allocate a hashed connection */ + hash_con = kzalloc(sizeof(*hash_con), GFP_ATOMIC); + if (!hash_con) + return -ENOMEM; + + /* Acquire the connection */ + rc = qed_nvmetcp_acquire_connection(QED_AFFIN_HWFN(cdev), + &hash_con->con); + if (rc) { + DP_NOTICE(cdev, "Failed to acquire Connection\n"); + kfree(hash_con); + + return rc; + } + + /* Added the connection to hash table */ + *handle = hash_con->con->icid; + *fw_cid = hash_con->con->fw_cid; + hash_add(cdev->connections, &hash_con->node, *handle); + if (p_doorbell) + *p_doorbell = qed_nvmetcp_get_db_addr(QED_AFFIN_HWFN(cdev), + *handle); + + return 0; +} + +static int qed_nvmetcp_release_conn(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_nvmetcp_con *hash_con; + + hash_con = qed_nvmetcp_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + + return -EINVAL; + } + + hlist_del(&hash_con->node); + qed_nvmetcp_release_connection(QED_AFFIN_HWFN(cdev), hash_con->con); + kfree(hash_con); + + return 0; +} + +static int qed_nvmetcp_offload_conn(struct qed_dev *cdev, u32 handle, + struct qed_nvmetcp_params_offload *conn_info) +{ + struct qed_hash_nvmetcp_con *hash_con; + struct qed_nvmetcp_conn *con; + + hash_con = qed_nvmetcp_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + + /* FW initializations */ + con->layer_code = NVMETCP_SLOW_PATH_LAYER_CODE; + con->sq_pbl_addr = conn_info->sq_pbl_addr; + con->nvmetcp_cccid_max_range = conn_info->nvmetcp_cccid_max_range; + con->nvmetcp_cccid_itid_table_addr = conn_info->nvmetcp_cccid_itid_table_addr; + con->default_cq = conn_info->default_cq; + SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TARGET_MODE, 0); + SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_NVMETCP_MODE, 1); + SET_FIELD(con->offl_flags, NVMETCP_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B, 1); + + /* Networking and TCP stack initializations */ + ether_addr_copy(con->local_mac, conn_info->src.mac); + ether_addr_copy(con->remote_mac, conn_info->dst.mac); + memcpy(con->local_ip, conn_info->src.ip, sizeof(con->local_ip)); + memcpy(con->remote_ip, conn_info->dst.ip, sizeof(con->remote_ip)); + con->local_port = conn_info->src.port; + con->remote_port = conn_info->dst.port; + con->vlan_id = conn_info->vlan_id; + + if (conn_info->timestamp_en) + SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_TS_EN, 1); + + if (conn_info->delayed_ack_en) + SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_DA_EN, 1); + + if (conn_info->tcp_keep_alive_en) + SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_KA_EN, 1); + + if (conn_info->ecn_en) + SET_FIELD(con->tcp_flags, TCP_OFFLOAD_PARAMS_OPT2_ECN_EN, 1); + + con->ip_version = conn_info->ip_version; + con->flow_label = QED_TCP_FLOW_LABEL; + con->ka_max_probe_cnt = conn_info->ka_max_probe_cnt; + con->ka_timeout = conn_info->ka_timeout; + con->ka_interval = conn_info->ka_interval; + con->max_rt_time = conn_info->max_rt_time; + con->ttl = conn_info->ttl; + con->tos_or_tc = conn_info->tos_or_tc; + con->mss = conn_info->mss; + con->cwnd = conn_info->cwnd; + con->rcv_wnd_scale = conn_info->rcv_wnd_scale; + con->connect_mode = 0; + + return qed_sp_nvmetcp_conn_offload(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_nvmetcp_update_conn(struct qed_dev *cdev, + u32 handle, + struct qed_nvmetcp_params_update *conn_info) +{ + struct qed_hash_nvmetcp_con *hash_con; + struct qed_nvmetcp_conn *con; + + hash_con = qed_nvmetcp_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + + return -EINVAL; + } + + /* Update the connection with information from the params */ + con = hash_con->con; + SET_FIELD(con->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T, 0); + SET_FIELD(con->update_flag, + ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA, 1); + if (conn_info->hdr_digest_en) + SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN, 1); + + if (conn_info->data_digest_en) + SET_FIELD(con->update_flag, ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN, 1); + + /* Placeholder - initialize pfv, cpda, hpda */ + + con->max_seq_size = conn_info->max_io_size; + con->max_recv_pdu_length = conn_info->max_recv_pdu_length; + con->max_send_pdu_length = conn_info->max_send_pdu_length; + con->first_seq_length = conn_info->max_io_size; + + return qed_sp_nvmetcp_conn_update(QED_AFFIN_HWFN(cdev), con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_nvmetcp_clear_conn_sq(struct qed_dev *cdev, u32 handle) +{ + struct qed_hash_nvmetcp_con *hash_con; + + hash_con = qed_nvmetcp_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + + return -EINVAL; + } + + return qed_sp_nvmetcp_conn_clear_sq(QED_AFFIN_HWFN(cdev), hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static int qed_nvmetcp_destroy_conn(struct qed_dev *cdev, + u32 handle, u8 abrt_conn) +{ + struct qed_hash_nvmetcp_con *hash_con; + + hash_con = qed_nvmetcp_get_hash(cdev, handle); + if (!hash_con) { + DP_NOTICE(cdev, "Failed to find connection for handle %d\n", + handle); + + return -EINVAL; + } + + hash_con->con->abortive_dsconnect = abrt_conn; + + return qed_sp_nvmetcp_conn_terminate(QED_AFFIN_HWFN(cdev), hash_con->con, + QED_SPQ_MODE_EBLOCK, NULL); +} + +static const struct qed_nvmetcp_ops qed_nvmetcp_ops_pass = { + .common = &qed_common_ops_pass, + .ll2 = &qed_ll2_ops_pass, + .fill_dev_info = &qed_fill_nvmetcp_dev_info, + .register_ops = &qed_register_nvmetcp_ops, + .start = &qed_nvmetcp_start, + .stop = &qed_nvmetcp_stop, + .acquire_conn = &qed_nvmetcp_acquire_conn, + .release_conn = &qed_nvmetcp_release_conn, + .offload_conn = &qed_nvmetcp_offload_conn, + .update_conn = &qed_nvmetcp_update_conn, + .destroy_conn = &qed_nvmetcp_destroy_conn, + .clear_sq = &qed_nvmetcp_clear_conn_sq, + .add_src_tcp_port_filter = &qed_llh_add_src_tcp_port_filter, + .remove_src_tcp_port_filter = &qed_llh_remove_src_tcp_port_filter, + .add_dst_tcp_port_filter = &qed_llh_add_dst_tcp_port_filter, + .remove_dst_tcp_port_filter = &qed_llh_remove_dst_tcp_port_filter, + .clear_all_filters = &qed_llh_clear_all_filters, + .init_read_io = &init_nvmetcp_host_read_task, + .init_write_io = &init_nvmetcp_host_write_task, + .init_icreq_exchange = &init_nvmetcp_init_conn_req_task, + .init_task_cleanup = &init_cleanup_task_nvmetcp +}; + +const struct qed_nvmetcp_ops *qed_get_nvmetcp_ops(void) +{ + return &qed_nvmetcp_ops_pass; +} +EXPORT_SYMBOL(qed_get_nvmetcp_ops); + +void qed_put_nvmetcp_ops(void) +{ +} +EXPORT_SYMBOL(qed_put_nvmetcp_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h new file mode 100644 index 0000000000..e5e9d075bf --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef _QED_NVMETCP_H +#define _QED_NVMETCP_H + +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/tcp_common.h> +#include <linux/qed/qed_nvmetcp_if.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" +#include "qed_mcp.h" +#include "qed_sp.h" + +#define QED_NVMETCP_FW_CQ_SIZE (4 * 1024) + +/* tcp parameters */ +#define QED_TCP_FLOW_LABEL 0 +#define QED_TCP_TWO_MSL_TIMER 4000 +#define QED_TCP_HALF_WAY_CLOSE_TIMEOUT 10 +#define QED_TCP_MAX_FIN_RT 2 +#define QED_TCP_SWS_TIMER 5000 + +struct qed_nvmetcp_info { + spinlock_t lock; /* Connection resources. */ + struct list_head free_list; + u16 max_num_outstanding_tasks; + void *event_context; + nvmetcp_event_cb_t event_cb; +}; + +struct qed_hash_nvmetcp_con { + struct hlist_node node; + struct qed_nvmetcp_conn *con; +}; + +struct qed_nvmetcp_conn { + struct list_head list_entry; + bool free_on_delete; + u16 conn_id; + u32 icid; + u32 fw_cid; + u8 layer_code; + u8 offl_flags; + u8 connect_mode; + dma_addr_t sq_pbl_addr; + struct qed_chain r2tq; + struct qed_chain xhq; + struct qed_chain uhq; + u8 local_mac[6]; + u8 remote_mac[6]; + u8 ip_version; + u8 ka_max_probe_cnt; + u16 vlan_id; + u16 tcp_flags; + u32 remote_ip[4]; + u32 local_ip[4]; + u32 flow_label; + u32 ka_timeout; + u32 ka_interval; + u32 max_rt_time; + u8 ttl; + u8 tos_or_tc; + u16 remote_port; + u16 local_port; + u16 mss; + u8 rcv_wnd_scale; + u32 rcv_wnd; + u32 cwnd; + u8 update_flag; + u8 default_cq; + u8 abortive_dsconnect; + u32 max_seq_size; + u32 max_recv_pdu_length; + u32 max_send_pdu_length; + u32 first_seq_length; + u16 physical_q0; + u16 physical_q1; + u16 nvmetcp_cccid_max_range; + dma_addr_t nvmetcp_cccid_itid_table_addr; +}; + +#if IS_ENABLED(CONFIG_QED_NVMETCP) +int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn); +void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn); +void qed_nvmetcp_free(struct qed_hwfn *p_hwfn); + +#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */ +static inline int qed_nvmetcp_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_nvmetcp_setup(struct qed_hwfn *p_hwfn) {} +static inline void qed_nvmetcp_free(struct qed_hwfn *p_hwfn) {} + +#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c new file mode 100644 index 0000000000..3b84d00cf9 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c @@ -0,0 +1,375 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* Copyright 2021 Marvell. All rights reserved. */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/storage_common.h> +#include <linux/qed/nvmetcp_common.h> +#include <linux/qed/qed_nvmetcp_if.h> +#include "qed_nvmetcp_fw_funcs.h" + +#define NVMETCP_NUM_SGES_IN_CACHE 0x4 + +bool nvmetcp_is_slow_sgl(u16 num_sges, bool small_mid_sge) +{ + return (num_sges > SCSI_NUM_SGES_SLOW_SGL_THR && small_mid_sge); +} + +void init_scsi_sgl_context(struct scsi_sgl_params *ctx_sgl_params, + struct scsi_cached_sges *ctx_data_desc, + struct storage_sgl_task_params *sgl_params) +{ + u8 num_sges_to_init = (u8)(sgl_params->num_sges > NVMETCP_NUM_SGES_IN_CACHE ? + NVMETCP_NUM_SGES_IN_CACHE : sgl_params->num_sges); + u8 sge_index; + + /* sgl params */ + ctx_sgl_params->sgl_addr.lo = cpu_to_le32(sgl_params->sgl_phys_addr.lo); + ctx_sgl_params->sgl_addr.hi = cpu_to_le32(sgl_params->sgl_phys_addr.hi); + ctx_sgl_params->sgl_total_length = cpu_to_le32(sgl_params->total_buffer_size); + ctx_sgl_params->sgl_num_sges = cpu_to_le16(sgl_params->num_sges); + + for (sge_index = 0; sge_index < num_sges_to_init; sge_index++) { + ctx_data_desc->sge[sge_index].sge_addr.lo = + cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.lo); + ctx_data_desc->sge[sge_index].sge_addr.hi = + cpu_to_le32(sgl_params->sgl[sge_index].sge_addr.hi); + ctx_data_desc->sge[sge_index].sge_len = + cpu_to_le32(sgl_params->sgl[sge_index].sge_len); + } +} + +static inline u32 calc_rw_task_size(struct nvmetcp_task_params *task_params, + enum nvmetcp_task_type task_type) +{ + u32 io_size; + + if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) + io_size = task_params->tx_io_size; + else + io_size = task_params->rx_io_size; + + if (unlikely(!io_size)) + return 0; + + return io_size; +} + +static inline void init_sqe(struct nvmetcp_task_params *task_params, + struct storage_sgl_task_params *sgl_task_params, + enum nvmetcp_task_type task_type) +{ + if (!task_params->sqe) + return; + + memset(task_params->sqe, 0, sizeof(*task_params->sqe)); + task_params->sqe->task_id = cpu_to_le16(task_params->itid); + + switch (task_type) { + case NVMETCP_TASK_TYPE_HOST_WRITE: { + u32 buf_size = 0; + u32 num_sges = 0; + + SET_FIELD(task_params->sqe->contlen_cdbsize, + NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, + NVMETCP_WQE_TYPE_NORMAL); + if (task_params->tx_io_size) { + if (task_params->send_write_incapsule) + buf_size = calc_rw_task_size(task_params, task_type); + + if (nvmetcp_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge)) + num_sges = NVMETCP_WQE_NUM_SGES_SLOWIO; + else + num_sges = min((u16)sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR); + } + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, num_sges); + SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, buf_size); + } break; + + case NVMETCP_TASK_TYPE_HOST_READ: { + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, + NVMETCP_WQE_TYPE_NORMAL); + SET_FIELD(task_params->sqe->contlen_cdbsize, + NVMETCP_WQE_CDB_SIZE_OR_NVMETCP_CMD, 1); + } break; + + case NVMETCP_TASK_TYPE_INIT_CONN_REQUEST: { + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, + NVMETCP_WQE_TYPE_MIDDLE_PATH); + + if (task_params->tx_io_size) { + SET_FIELD(task_params->sqe->contlen_cdbsize, NVMETCP_WQE_CONT_LEN, + task_params->tx_io_size); + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_NUM_SGES, + min((u16)sgl_task_params->num_sges, + (u16)SCSI_NUM_SGES_SLOW_SGL_THR)); + } + } break; + + case NVMETCP_TASK_TYPE_CLEANUP: + SET_FIELD(task_params->sqe->flags, NVMETCP_WQE_WQE_TYPE, + NVMETCP_WQE_TYPE_TASK_CLEANUP); + + default: + break; + } +} + +/* The following function initializes of NVMeTCP task params */ +static inline void +init_nvmetcp_task_params(struct e5_nvmetcp_task_context *context, + struct nvmetcp_task_params *task_params, + enum nvmetcp_task_type task_type) +{ + context->ystorm_st_context.state.cccid = task_params->host_cccid; + SET_FIELD(context->ustorm_st_context.error_flags, USTORM_NVMETCP_TASK_ST_CTX_NVME_TCP, 1); + context->ustorm_st_context.nvme_tcp_opaque_lo = cpu_to_le32(task_params->opq.lo); + context->ustorm_st_context.nvme_tcp_opaque_hi = cpu_to_le32(task_params->opq.hi); +} + +/* The following function initializes default values to all tasks */ +static inline void +init_default_nvmetcp_task(struct nvmetcp_task_params *task_params, + void *pdu_header, void *nvme_cmd, + enum nvmetcp_task_type task_type) +{ + struct e5_nvmetcp_task_context *context = task_params->context; + const u8 val_byte = context->mstorm_ag_context.cdu_validation; + u8 dw_index; + + memset(context, 0, sizeof(*context)); + init_nvmetcp_task_params(context, task_params, + (enum nvmetcp_task_type)task_type); + + /* Swapping requirements used below, will be removed in future FW versions */ + if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE || + task_type == NVMETCP_TASK_TYPE_HOST_READ) { + for (dw_index = 0; + dw_index < QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); + dw_index++) + context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = + cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); + + for (dw_index = QED_NVMETCP_CMN_HDR_SIZE / sizeof(u32); + dw_index < QED_NVMETCP_CMD_HDR_SIZE / sizeof(u32); + dw_index++) + context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = + cpu_to_le32(__swab32(((u32 *)nvme_cmd)[dw_index - 2])); + } else { + for (dw_index = 0; + dw_index < QED_NVMETCP_NON_IO_HDR_SIZE / sizeof(u32); + dw_index++) + context->ystorm_st_context.pdu_hdr.task_hdr.reg[dw_index] = + cpu_to_le32(__swab32(((u32 *)pdu_header)[dw_index])); + } + + /* M-Storm Context: */ + context->mstorm_ag_context.cdu_validation = val_byte; + context->mstorm_st_context.task_type = (u8)(task_type); + context->mstorm_ag_context.task_cid = cpu_to_le16(task_params->conn_icid); + + /* Ustorm Context: */ + SET_FIELD(context->ustorm_ag_context.flags1, E5_USTORM_NVMETCP_TASK_AG_CTX_R2T2RECV, 1); + context->ustorm_st_context.task_type = (u8)(task_type); + context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number; + context->ustorm_ag_context.icid = cpu_to_le16(task_params->conn_icid); +} + +/* The following function initializes the U-Storm Task Contexts */ +static inline void +init_ustorm_task_contexts(struct ustorm_nvmetcp_task_st_ctx *ustorm_st_context, + struct e5_ustorm_nvmetcp_task_ag_ctx *ustorm_ag_context, + u32 remaining_recv_len, + u32 expected_data_transfer_len, u8 num_sges, + bool tx_dif_conn_err_en) +{ + /* Remaining data to be received in bytes. Used in validations*/ + ustorm_st_context->rem_rcv_len = cpu_to_le32(remaining_recv_len); + ustorm_ag_context->exp_data_acked = cpu_to_le32(expected_data_transfer_len); + ustorm_st_context->exp_data_transfer_len = cpu_to_le32(expected_data_transfer_len); + SET_FIELD(ustorm_st_context->reg1_map, REG1_NUM_SGES, num_sges); + SET_FIELD(ustorm_ag_context->flags2, E5_USTORM_NVMETCP_TASK_AG_CTX_DIF_ERROR_CF_EN, + tx_dif_conn_err_en ? 1 : 0); +} + +/* The following function initializes Local Completion Contexts: */ +static inline void +set_local_completion_context(struct e5_nvmetcp_task_context *context) +{ + SET_FIELD(context->ystorm_st_context.state.flags, + YSTORM_NVMETCP_TASK_STATE_LOCAL_COMP, 1); + SET_FIELD(context->ustorm_st_context.flags, + USTORM_NVMETCP_TASK_ST_CTX_LOCAL_COMP, 1); +} + +/* Common Fastpath task init function: */ +static inline void +init_rw_nvmetcp_task(struct nvmetcp_task_params *task_params, + enum nvmetcp_task_type task_type, + void *pdu_header, void *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params) +{ + struct e5_nvmetcp_task_context *context = task_params->context; + u32 task_size = calc_rw_task_size(task_params, task_type); + bool slow_io = false; + u8 num_sges = 0; + + init_default_nvmetcp_task(task_params, pdu_header, nvme_cmd, task_type); + + /* Tx/Rx: */ + if (task_params->tx_io_size) { + /* if data to transmit: */ + init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, + &context->ystorm_st_context.state.data_desc, + sgl_task_params); + slow_io = nvmetcp_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge); + num_sges = + (u8)(!slow_io ? min((u32)sgl_task_params->num_sges, + (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : + NVMETCP_WQE_NUM_SGES_SLOWIO); + if (slow_io) { + SET_FIELD(context->ystorm_st_context.state.flags, + YSTORM_NVMETCP_TASK_STATE_SLOW_IO, 1); + } + } else if (task_params->rx_io_size) { + /* if data to receive: */ + init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, + &context->mstorm_st_context.data_desc, + sgl_task_params); + num_sges = + (u8)(!nvmetcp_is_slow_sgl(sgl_task_params->num_sges, + sgl_task_params->small_mid_sge) ? + min((u32)sgl_task_params->num_sges, + (u32)SCSI_NUM_SGES_SLOW_SGL_THR) : + NVMETCP_WQE_NUM_SGES_SLOWIO); + context->mstorm_st_context.rem_task_size = cpu_to_le32(task_size); + } + + /* Ustorm context: */ + init_ustorm_task_contexts(&context->ustorm_st_context, + &context->ustorm_ag_context, + /* Remaining Receive length is the Task Size */ + task_size, + /* The size of the transmitted task */ + task_size, + /* num_sges */ + num_sges, + false); + + /* Set exp_data_acked */ + if (task_type == NVMETCP_TASK_TYPE_HOST_WRITE) { + if (task_params->send_write_incapsule) + context->ustorm_ag_context.exp_data_acked = task_size; + else + context->ustorm_ag_context.exp_data_acked = 0; + } else if (task_type == NVMETCP_TASK_TYPE_HOST_READ) { + context->ustorm_ag_context.exp_data_acked = 0; + } + + context->ustorm_ag_context.exp_cont_len = 0; + init_sqe(task_params, sgl_task_params, task_type); +} + +static void +init_common_initiator_read_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params) +{ + init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_READ, + cmd_pdu_header, nvme_cmd, sgl_task_params); +} + +void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params) +{ + init_common_initiator_read_task(task_params, (void *)cmd_pdu_header, + (void *)nvme_cmd, sgl_task_params); +} + +static void +init_common_initiator_write_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params) +{ + init_rw_nvmetcp_task(task_params, NVMETCP_TASK_TYPE_HOST_WRITE, + cmd_pdu_header, nvme_cmd, sgl_task_params); +} + +void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params) +{ + init_common_initiator_write_task(task_params, (void *)cmd_pdu_header, + (void *)nvme_cmd, sgl_task_params); +} + +static void +init_common_login_request_task(struct nvmetcp_task_params *task_params, + void *login_req_pdu_header, + struct storage_sgl_task_params *tx_sgl_task_params, + struct storage_sgl_task_params *rx_sgl_task_params) +{ + struct e5_nvmetcp_task_context *context = task_params->context; + + init_default_nvmetcp_task(task_params, (void *)login_req_pdu_header, NULL, + NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); + + /* Ustorm Context: */ + init_ustorm_task_contexts(&context->ustorm_st_context, + &context->ustorm_ag_context, + + /* Remaining Receive length is the Task Size */ + task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : 0, + + /* The size of the transmitted task */ + task_params->tx_io_size ? + tx_sgl_task_params->total_buffer_size : 0, + 0, /* num_sges */ + 0); /* tx_dif_conn_err_en */ + + /* SGL context: */ + if (task_params->tx_io_size) + init_scsi_sgl_context(&context->ystorm_st_context.state.sgl_params, + &context->ystorm_st_context.state.data_desc, + tx_sgl_task_params); + if (task_params->rx_io_size) + init_scsi_sgl_context(&context->mstorm_st_context.sgl_params, + &context->mstorm_st_context.data_desc, + rx_sgl_task_params); + + context->mstorm_st_context.rem_task_size = + cpu_to_le32(task_params->rx_io_size ? + rx_sgl_task_params->total_buffer_size : 0); + init_sqe(task_params, tx_sgl_task_params, NVMETCP_TASK_TYPE_INIT_CONN_REQUEST); +} + +/* The following function initializes Login task in Host mode: */ +void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, + struct storage_sgl_task_params *tx_sgl_task_params, + struct storage_sgl_task_params *rx_sgl_task_params) +{ + init_common_login_request_task(task_params, init_conn_req_pdu_hdr, + tx_sgl_task_params, rx_sgl_task_params); +} + +void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params) +{ + init_sqe(task_params, NULL, NVMETCP_TASK_TYPE_CLEANUP); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h new file mode 100644 index 0000000000..1d5ddc217b --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright 2021 Marvell. All rights reserved. */ + +#ifndef _QED_NVMETCP_FW_FUNCS_H +#define _QED_NVMETCP_FW_FUNCS_H + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/mm.h> +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/qed/common_hsi.h> +#include <linux/qed/storage_common.h> +#include <linux/qed/nvmetcp_common.h> +#include <linux/qed/qed_nvmetcp_if.h> + +#if IS_ENABLED(CONFIG_QED_NVMETCP) + +void init_nvmetcp_host_read_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); +void init_nvmetcp_host_write_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_cmd_pdu *cmd_pdu_header, + struct nvme_command *nvme_cmd, + struct storage_sgl_task_params *sgl_task_params); +void init_nvmetcp_init_conn_req_task(struct nvmetcp_task_params *task_params, + struct nvme_tcp_icreq_pdu *init_conn_req_pdu_hdr, + struct storage_sgl_task_params *tx_sgl_task_params, + struct storage_sgl_task_params *rx_sgl_task_params); +void init_cleanup_task_nvmetcp(struct nvmetcp_task_params *task_params); + +#else /* IS_ENABLED(CONFIG_QED_NVMETCP) */ + +#endif /* IS_ENABLED(CONFIG_QED_NVMETCP) */ + +#endif /* _QED_NVMETCP_FW_FUNCS_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c new file mode 100644 index 0000000000..5d725f59db --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_iscsi.h" +#include "qed_ll2.h" +#include "qed_ooo.h" +#include "qed_cxt.h" +#include "qed_nvmetcp.h" +static struct qed_ooo_archipelago +*qed_ooo_seek_archipelago(struct qed_hwfn *p_hwfn, + struct qed_ooo_info + *p_ooo_info, + u32 cid) +{ + u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; + struct qed_ooo_archipelago *p_archipelago; + + if (unlikely(idx >= p_ooo_info->max_num_archipelagos)) + return NULL; + + p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; + + if (unlikely(list_empty(&p_archipelago->isles_list))) + return NULL; + + return p_archipelago; +} + +static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 isle) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_isle = NULL; + u8 the_num_of_isle = 1; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (unlikely(!p_archipelago)) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return NULL; + } + + list_for_each_entry(p_isle, &p_archipelago->isles_list, list_entry) { + if (the_num_of_isle == isle) + return p_isle; + the_num_of_isle++; + } + + return NULL; +} + +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) +{ + struct qed_ooo_history *p_history = &p_ooo_info->ooo_history; + + if (p_history->head_idx == p_history->num_of_cqes) + p_history->head_idx = 0; + p_history->p_cqes[p_history->head_idx] = *p_cqe; + p_history->head_idx++; +} + +int qed_ooo_alloc(struct qed_hwfn *p_hwfn) +{ + u16 max_num_archipelagos = 0, cid_base; + struct qed_ooo_info *p_ooo_info; + enum protocol_type proto; + u16 max_num_isles = 0; + u32 i; + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + proto = PROTOCOLID_TCP_ULP; + break; + case QED_PCI_ETH_RDMA: + case QED_PCI_ETH_IWARP: + proto = PROTOCOLID_IWARP; + break; + default: + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown personality\n"); + return -EINVAL; + } + + max_num_archipelagos = (u16)qed_cxt_get_proto_cid_count(p_hwfn, proto, + NULL); + max_num_isles = QED_MAX_NUM_ISLES + max_num_archipelagos; + cid_base = (u16)qed_cxt_get_proto_cid_start(p_hwfn, proto); + + if (!max_num_archipelagos) { + DP_NOTICE(p_hwfn, + "Failed to allocate qed_ooo_info: unknown amount of connections\n"); + return -EINVAL; + } + + p_ooo_info = kzalloc(sizeof(*p_ooo_info), GFP_KERNEL); + if (!p_ooo_info) + return -ENOMEM; + + p_ooo_info->cid_base = cid_base; + p_ooo_info->max_num_archipelagos = max_num_archipelagos; + + INIT_LIST_HEAD(&p_ooo_info->free_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->ready_buffers_list); + INIT_LIST_HEAD(&p_ooo_info->free_isles_list); + + p_ooo_info->p_isles_mem = kcalloc(max_num_isles, + sizeof(struct qed_ooo_isle), + GFP_KERNEL); + if (!p_ooo_info->p_isles_mem) + goto no_isles_mem; + + for (i = 0; i < max_num_isles; i++) { + INIT_LIST_HEAD(&p_ooo_info->p_isles_mem[i].buffers_list); + list_add_tail(&p_ooo_info->p_isles_mem[i].list_entry, + &p_ooo_info->free_isles_list); + } + + p_ooo_info->p_archipelagos_mem = + kcalloc(max_num_archipelagos, + sizeof(struct qed_ooo_archipelago), + GFP_KERNEL); + if (!p_ooo_info->p_archipelagos_mem) + goto no_archipelagos_mem; + + for (i = 0; i < max_num_archipelagos; i++) + INIT_LIST_HEAD(&p_ooo_info->p_archipelagos_mem[i].isles_list); + + p_ooo_info->ooo_history.p_cqes = + kcalloc(QED_MAX_NUM_OOO_HISTORY_ENTRIES, + sizeof(struct ooo_opaque), + GFP_KERNEL); + if (!p_ooo_info->ooo_history.p_cqes) + goto no_history_mem; + + p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES; + + p_hwfn->p_ooo_info = p_ooo_info; + return 0; + +no_history_mem: + kfree(p_ooo_info->p_archipelagos_mem); +no_archipelagos_mem: + kfree(p_ooo_info->p_isles_mem); +no_isles_mem: + kfree(p_ooo_info); + return -ENOMEM; +} + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid) +{ + struct qed_ooo_archipelago *p_archipelago; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (!p_archipelago) + return; + + while (!list_empty(&p_archipelago->isles_list)) { + p_isle = list_first_entry(&p_archipelago->isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } +} + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_archipelago *p_archipelago; + struct qed_ooo_buffer *p_buffer; + struct qed_ooo_isle *p_isle; + u32 i; + + for (i = 0; i < p_ooo_info->max_num_archipelagos; i++) { + p_archipelago = &(p_ooo_info->p_archipelagos_mem[i]); + + while (!list_empty(&p_archipelago->isles_list)) { + p_isle = list_first_entry(&p_archipelago->isles_list, + struct qed_ooo_isle, + list_entry); + + list_del(&p_isle->list_entry); + + while (!list_empty(&p_isle->buffers_list)) { + p_buffer = + list_first_entry(&p_isle->buffers_list, + struct qed_ooo_buffer, + list_entry); + + if (!p_buffer) + break; + + list_move_tail(&p_buffer->list_entry, + &p_ooo_info->free_buffers_list); + } + list_add_tail(&p_isle->list_entry, + &p_ooo_info->free_isles_list); + } + } + if (!list_empty(&p_ooo_info->ready_buffers_list)) + list_splice_tail_init(&p_ooo_info->ready_buffers_list, + &p_ooo_info->free_buffers_list); +} + +void qed_ooo_setup(struct qed_hwfn *p_hwfn) +{ + qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info); + memset(p_hwfn->p_ooo_info->ooo_history.p_cqes, 0, + p_hwfn->p_ooo_info->ooo_history.num_of_cqes * + sizeof(struct ooo_opaque)); + p_hwfn->p_ooo_info->ooo_history.head_idx = 0; +} + +void qed_ooo_free(struct qed_hwfn *p_hwfn) +{ + struct qed_ooo_info *p_ooo_info = p_hwfn->p_ooo_info; + struct qed_ooo_buffer *p_buffer; + + if (!p_ooo_info) + return; + + qed_ooo_release_all_isles(p_hwfn, p_ooo_info); + while (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + if (!p_buffer) + break; + + list_del(&p_buffer->list_entry); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_buffer->rx_buffer_size, + p_buffer->rx_buffer_virt_addr, + p_buffer->rx_buffer_phys_addr); + kfree(p_buffer); + } + + kfree(p_ooo_info->p_isles_mem); + kfree(p_ooo_info->p_archipelagos_mem); + kfree(p_ooo_info->ooo_history.p_cqes); + kfree(p_ooo_info); + p_hwfn->p_ooo_info = NULL; +} + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) +{ + list_add_tail(&p_buffer->list_entry, &p_ooo_info->free_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->free_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->free_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail) +{ + if (on_tail) + list_add_tail(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); + else + list_add(&p_buffer->list_entry, + &p_ooo_info->ready_buffers_list); +} + +struct qed_ooo_buffer *qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) +{ + struct qed_ooo_buffer *p_buffer = NULL; + + if (!list_empty(&p_ooo_info->ready_buffers_list)) { + p_buffer = list_first_entry(&p_ooo_info->ready_buffers_list, + struct qed_ooo_buffer, list_entry); + + list_del(&p_buffer->list_entry); + } + + return p_buffer; +} + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) +{ + struct qed_ooo_isle *p_isle = NULL; + u8 isle_idx; + + for (isle_idx = 0; isle_idx < drop_size; isle_idx++) { + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, drop_isle); + if (!p_isle) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + drop_isle, cid); + return; + } + if (list_empty(&p_isle->buffers_list)) + DP_NOTICE(p_hwfn, + "Isle %d is empty(cid %d)\n", drop_isle, cid); + else + list_splice_tail_init(&p_isle->buffers_list, + &p_ooo_info->free_buffers_list); + + list_del(&p_isle->list_entry); + p_ooo_info->cur_isles_number--; + list_add(&p_isle->list_entry, &p_ooo_info->free_isles_list); + } +} + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) +{ + struct qed_ooo_archipelago *p_archipelago = NULL; + struct qed_ooo_isle *p_prev_isle = NULL; + struct qed_ooo_isle *p_isle = NULL; + + if (ooo_isle > 1) { + p_prev_isle = qed_ooo_seek_isle(p_hwfn, + p_ooo_info, cid, ooo_isle - 1); + if (unlikely(!p_prev_isle)) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", + ooo_isle - 1, cid); + return; + } + } + p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid); + if (unlikely(!p_archipelago && ooo_isle != 1)) { + DP_NOTICE(p_hwfn, + "Connection %d is not found in OOO list\n", cid); + return; + } + + if (!list_empty(&p_ooo_info->free_isles_list)) { + p_isle = list_first_entry(&p_ooo_info->free_isles_list, + struct qed_ooo_isle, list_entry); + + list_del(&p_isle->list_entry); + if (unlikely(!list_empty(&p_isle->buffers_list))) { + DP_NOTICE(p_hwfn, "Free isle is not empty\n"); + INIT_LIST_HEAD(&p_isle->buffers_list); + } + } else { + DP_NOTICE(p_hwfn, "No more free isles\n"); + return; + } + + if (!p_archipelago) { + u32 idx = (cid & 0xffff) - p_ooo_info->cid_base; + + p_archipelago = &p_ooo_info->p_archipelagos_mem[idx]; + } + + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + p_ooo_info->cur_isles_number++; + p_ooo_info->gen_isles_number++; + + if (p_ooo_info->cur_isles_number > p_ooo_info->max_isles_number) + p_ooo_info->max_isles_number = p_ooo_info->cur_isles_number; + + if (!p_prev_isle) + list_add(&p_isle->list_entry, &p_archipelago->isles_list); + else + list_add(&p_isle->list_entry, &p_prev_isle->list_entry); +} + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side) +{ + struct qed_ooo_isle *p_isle = NULL; + + p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle); + if (unlikely(!p_isle)) { + DP_NOTICE(p_hwfn, + "Isle %d is not found(cid %d)\n", ooo_isle, cid); + return; + } + + if (unlikely(buffer_side == QED_OOO_LEFT_BUF)) + list_add(&p_buffer->list_entry, &p_isle->buffers_list); + else + list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list); +} + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, u8 left_isle) +{ + struct qed_ooo_isle *p_right_isle = NULL; + struct qed_ooo_isle *p_left_isle = NULL; + + p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle + 1); + if (unlikely(!p_right_isle)) { + DP_NOTICE(p_hwfn, + "Right isle %d is not found(cid %d)\n", + left_isle + 1, cid); + return; + } + + list_del(&p_right_isle->list_entry); + p_ooo_info->cur_isles_number--; + if (left_isle) { + p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, + left_isle); + if (unlikely(!p_left_isle)) { + DP_NOTICE(p_hwfn, + "Left isle %d is not found(cid %d)\n", + left_isle, cid); + return; + } + list_splice_tail_init(&p_right_isle->buffers_list, + &p_left_isle->buffers_list); + } else { + list_splice_tail_init(&p_right_isle->buffers_list, + &p_ooo_info->ready_buffers_list); + } + list_add_tail(&p_right_isle->list_entry, &p_ooo_info->free_isles_list); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.h b/drivers/net/ethernet/qlogic/qed/qed_ooo.h new file mode 100644 index 0000000000..3a7e1b59d6 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_OOO_H +#define _QED_OOO_H +#include <linux/types.h> +#include <linux/list.h> +#include <linux/slab.h> +#include "qed.h" + +#define QED_MAX_NUM_ISLES 256 +#define QED_MAX_NUM_OOO_HISTORY_ENTRIES 512 + +#define QED_OOO_LEFT_BUF 0 +#define QED_OOO_RIGHT_BUF 1 + +struct qed_ooo_buffer { + struct list_head list_entry; + void *rx_buffer_virt_addr; + dma_addr_t rx_buffer_phys_addr; + u32 rx_buffer_size; + u16 packet_length; + u16 parse_flags; + u16 vlan; + u8 placement_offset; +}; + +struct qed_ooo_isle { + struct list_head list_entry; + struct list_head buffers_list; +}; + +struct qed_ooo_archipelago { + struct list_head isles_list; +}; + +struct qed_ooo_history { + struct ooo_opaque *p_cqes; + u32 head_idx; + u32 num_of_cqes; +}; + +struct qed_ooo_info { + struct list_head free_buffers_list; + struct list_head ready_buffers_list; + struct list_head free_isles_list; + struct qed_ooo_archipelago *p_archipelagos_mem; + struct qed_ooo_isle *p_isles_mem; + struct qed_ooo_history ooo_history; + u32 cur_isles_number; + u32 max_isles_number; + u32 gen_isles_number; + u16 max_num_archipelagos; + u16 cid_base; +}; + +#if IS_ENABLED(CONFIG_QED_OOO) +void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe); + +int qed_ooo_alloc(struct qed_hwfn *p_hwfn); + +void qed_ooo_setup(struct qed_hwfn *p_hwfn); + +void qed_ooo_free(struct qed_hwfn *p_hwfn); + +void qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid); + +void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer); + +struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, u8 on_tail); + +struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info); + +void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size); + +void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, struct qed_ooo_buffer *p_buffer); + +void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, + u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, u8 buffer_side); + +void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle); +#else /* IS_ENABLED(CONFIG_QED_ISCSI) */ +static inline void qed_ooo_save_history_entry(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct ooo_opaque *p_cqe) {} + +static inline int qed_ooo_alloc(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void qed_ooo_setup(struct qed_hwfn *p_hwfn) {} + +static inline void qed_ooo_free(struct qed_hwfn *p_hwfn) {} + +static inline void +qed_ooo_release_connection_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid) {} + +static inline void qed_ooo_release_all_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) + {} + +static inline void qed_ooo_put_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_free_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_put_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + struct qed_ooo_buffer *p_buffer, + u8 on_tail) {} + +static inline struct qed_ooo_buffer * +qed_ooo_get_ready_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info) { return NULL; } + +static inline void qed_ooo_delete_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 drop_isle, u8 drop_size) {} + +static inline void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer) {} + +static inline void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, + u32 cid, u8 ooo_isle, + struct qed_ooo_buffer *p_buffer, + u8 buffer_side) {} + +static inline void qed_ooo_join_isles(struct qed_hwfn *p_hwfn, + struct qed_ooo_info *p_ooo_info, u32 cid, + u8 left_isle) {} +#endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c new file mode 100644 index 0000000000..295ce435a1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hw.h" +#include "qed_l2.h" +#include "qed_mcp.h" +#include "qed_ptp.h" +#include "qed_reg_addr.h" + +/* 16 nano second time quantas to wait before making a Drift adjustment */ +#define QED_DRIFT_CNTR_TIME_QUANTA_SHIFT 0 +/* Nano seconds to add/subtract when making a Drift adjustment */ +#define QED_DRIFT_CNTR_ADJUSTMENT_SHIFT 28 +/* Add/subtract the Adjustment_Value when making a Drift adjustment */ +#define QED_DRIFT_CNTR_DIRECTION_SHIFT 31 +#define QED_TIMESTAMP_MASK BIT(16) +/* Param mask for Hardware to detect/timestamp the L2/L4 unicast PTP packets */ +#define QED_PTP_UCAST_PARAM_MASK 0x70F + +static enum qed_resc_lock qed_ptcdev_to_resc(struct qed_hwfn *p_hwfn) +{ + switch (MFW_PORT(p_hwfn)) { + case 0: + return QED_RESC_LOCK_PTP_PORT0; + case 1: + return QED_RESC_LOCK_PTP_PORT1; + case 2: + return QED_RESC_LOCK_PTP_PORT2; + case 3: + return QED_RESC_LOCK_PTP_PORT3; + default: + return QED_RESC_LOCK_RESC_INVALID; + } +} + +static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_lock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(¶ms, NULL, resource, true); + + rc = qed_mcp_resc_lock(p_hwfn, p_ptt, ¶ms); + if (rc && rc != -EINVAL) { + return rc; + } else if (rc == -EINVAL) { + /* MFW doesn't support resource locking, first PF on the port + * has lock ownership. + */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) + return 0; + + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EBUSY; + } else if (!params.b_granted) { + DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n"); + return -EBUSY; + } + + return 0; +} + +static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + struct qed_resc_unlock_params params; + enum qed_resc_lock resource; + int rc; + + resource = qed_ptcdev_to_resc(p_hwfn); + if (resource == QED_RESC_LOCK_RESC_INVALID) + return -EINVAL; + + qed_mcp_resc_lock_default_init(NULL, ¶ms, resource, true); + + rc = qed_mcp_resc_unlock(p_hwfn, p_ptt, ¶ms); + if (rc == -EINVAL) { + /* MFW doesn't support locking, first PF has lock ownership */ + if (p_hwfn->abs_pf_id < p_hwfn->cdev->num_ports_in_engine) { + rc = 0; + } else { + DP_INFO(p_hwfn, "PF doesn't have lock ownership\n"); + return -EINVAL; + } + } else if (rc) { + DP_INFO(p_hwfn, "Failed to release the ptp resource lock\n"); + } + + return rc; +} + +/* Read Rx timestamp */ +static int qed_ptp_hw_read_rx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_INFO(p_hwfn, "Invalid Rx timestamp, buf_seqid = %d\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Tx timestamp */ +static int qed_ptp_hw_read_tx_ts(struct qed_dev *cdev, u64 *timestamp) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 val; + + *timestamp = 0; + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID); + if (!(val & QED_TIMESTAMP_MASK)) { + DP_VERBOSE(p_hwfn, QED_MSG_DEBUG, + "Invalid Tx timestamp, buf_seqid = %08x\n", val); + return -EINVAL; + } + + val = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_LSB); + *timestamp = qed_rd(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_TS_MSB); + *timestamp <<= 32; + *timestamp |= val; + + /* Reset timestamp register to allow new timestamp */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +/* Read Phy Hardware Clock */ +static int qed_ptp_hw_read_cc(struct qed_dev *cdev, u64 *phc_cycles) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 temp = 0; + + temp = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_LSB); + *phc_cycles = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_SYNC_TIME_MSB); + *phc_cycles <<= 32; + *phc_cycles |= temp; + + return 0; +} + +/* Filter PTP protocol packets that need to be timestamped */ +static int qed_ptp_hw_cfg_filters(struct qed_dev *cdev, + enum qed_ptp_filter_type rx_type, + enum qed_ptp_hwtstamp_tx_type tx_type) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 rule_mask, enable_cfg = 0x0; + + switch (rx_type) { + case QED_PTP_FILTER_NONE: + enable_cfg = 0x0; + rule_mask = 0x3FFF; + break; + case QED_PTP_FILTER_ALL: + enable_cfg = 0x7; + rule_mask = 0x3CAA; + break; + case QED_PTP_FILTER_V1_L4_EVENT: + enable_cfg = 0x3; + rule_mask = 0x3FFA; + break; + case QED_PTP_FILTER_V1_L4_GEN: + enable_cfg = 0x3; + rule_mask = 0x3FFE; + break; + case QED_PTP_FILTER_V2_L4_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3FAA; + break; + case QED_PTP_FILTER_V2_L4_GEN: + enable_cfg = 0x5; + rule_mask = 0x3FEE; + break; + case QED_PTP_FILTER_V2_L2_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3CFF; + break; + case QED_PTP_FILTER_V2_L2_GEN: + enable_cfg = 0x5; + rule_mask = 0x3EFF; + break; + case QED_PTP_FILTER_V2_EVENT: + enable_cfg = 0x5; + rule_mask = 0x3CAA; + break; + case QED_PTP_FILTER_V2_GEN: + enable_cfg = 0x5; + rule_mask = 0x3EEE; + break; + default: + DP_INFO(p_hwfn, "Invalid PTP filter type %d\n", rx_type); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, + QED_PTP_UCAST_PARAM_MASK); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, rule_mask); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, enable_cfg); + + if (tx_type == QED_PTP_HWTSTAMP_TX_OFF) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + } else { + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, enable_cfg); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, + QED_PTP_UCAST_PARAM_MASK); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, rule_mask); + } + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + + return 0; +} + +/* Adjust the HW clock by a rate given in parts-per-billion (ppb) units. + * FW/HW accepts the adjustment value in terms of 3 parameters: + * Drift period - adjustment happens once in certain number of nano seconds. + * Drift value - time is adjusted by a certain value, for example by 5 ns. + * Drift direction - add or subtract the adjustment value. + * The routine translates ppb into the adjustment triplet in an optimal manner. + */ +static int qed_ptp_hw_adjfreq(struct qed_dev *cdev, s32 ppb) +{ + s64 best_val = 0, val, best_period = 0, period, approx_dev, dif, dif2; + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + u32 drift_ctr_cfg = 0, drift_state; + int drift_dir = 1; + + if (ppb < 0) { + ppb = -ppb; + drift_dir = 0; + } + + if (ppb > 1) { + s64 best_dif = ppb, best_approx_dev = 1; + + /* Adjustment value is up to +/-7ns, find an optimal value in + * this range. + */ + for (val = 7; val > 0; val--) { + period = div_s64(val * 1000000000, ppb); + period -= 8; + period >>= 4; + if (period < 1) + period = 1; + if (period > 0xFFFFFFE) + period = 0xFFFFFFE; + + /* Check both rounding ends for approximate error */ + approx_dev = period * 16 + 8; + dif = ppb * approx_dev - val * 1000000000; + dif2 = dif + 16 * ppb; + + if (dif < 0) + dif = -dif; + if (dif2 < 0) + dif2 = -dif2; + + /* Determine which end gives better approximation */ + if (dif * (approx_dev + 16) > dif2 * approx_dev) { + period++; + approx_dev += 16; + dif = dif2; + } + + /* Track best approximation found so far */ + if (best_dif * approx_dev > dif * best_approx_dev) { + best_dif = dif; + best_val = val; + best_period = period; + best_approx_dev = approx_dev; + } + } + } else if (ppb == 1) { + /* This is a special case as its the only value which wouldn't + * fit in a s64 variable. In order to prevent castings simple + * handle it seperately. + */ + best_val = 4; + best_period = 0xee6b27f; + } else { + best_val = 0; + best_period = 0xFFFFFFF; + } + + drift_ctr_cfg = (best_period << QED_DRIFT_CNTR_TIME_QUANTA_SHIFT) | + (((int)best_val) << QED_DRIFT_CNTR_ADJUSTMENT_SHIFT) | + (((int)drift_dir) << QED_DRIFT_CNTR_DIRECTION_SHIFT); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x1); + + drift_state = qed_rd(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR); + if (drift_state & 1) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, + drift_ctr_cfg); + } else { + DP_INFO(p_hwfn, "Drift counter is not reset\n"); + return -EINVAL; + } + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + return 0; +} + +static int qed_ptp_hw_enable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt; + int rc; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, "Failed to acquire PTT for PTP\n"); + return -EBUSY; + } + + p_hwfn->p_ptp_ptt = p_ptt; + + rc = qed_ptp_res_lock(p_hwfn, p_ptt); + if (rc) { + DP_INFO(p_hwfn, + "Couldn't acquire the resource lock, skip ptp enable for this PF\n"); + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + return rc; + } + + /* Reset PTP event detection rules - will be configured in the IOCTL */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 7); + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 7); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TS_OUTPUT_ENABLE_PDA, 0x1); + + /* Pause free running counter */ + if (QED_IS_BB_B0(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 2); + if (QED_IS_AH(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 2); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_LSB, 0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREE_CNT_VALUE_MSB, 0); + /* Resume free running counter */ + if (QED_IS_BB_B0(p_hwfn->cdev)) + qed_wr(p_hwfn, p_ptt, NIG_REG_TIMESYNC_GEN_REG_BB, 4); + if (QED_IS_AH(p_hwfn->cdev)) { + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_FREECNT_UPDATE_K2, 4); + qed_wr(p_hwfn, p_ptt, NIG_REG_PTP_LATCH_OSTS_PKT_TIME, 1); + } + + /* Disable drift register */ + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_DRIFT_CNTR_CONF, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TSGEN_RST_DRIFT_CNTR, 0x0); + + /* Reset possibly old timestamps */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_HOST_BUF_SEQID, + QED_TIMESTAMP_MASK); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_BUF_SEQID, QED_TIMESTAMP_MASK); + + return 0; +} + +static int qed_ptp_hw_disable(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = p_hwfn->p_ptp_ptt; + + qed_ptp_res_unlock(p_hwfn, p_ptt); + + /* Reset PTP event detection rules */ + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_PTP_RULE_MASK, 0x3FFF); + + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_PARAM_MASK, 0x7FF); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_LLH_PTP_RULE_MASK, 0x3FFF); + + /* Disable the PTP feature */ + qed_wr(p_hwfn, p_ptt, NIG_REG_RX_PTP_EN, 0x0); + qed_wr(p_hwfn, p_ptt, NIG_REG_TX_PTP_EN, 0x0); + + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_ptp_ptt = NULL; + + return 0; +} + +const struct qed_eth_ptp_ops qed_ptp_ops_pass = { + .cfg_filters = qed_ptp_hw_cfg_filters, + .read_rx_ts = qed_ptp_hw_read_rx_ts, + .read_tx_ts = qed_ptp_hw_read_tx_ts, + .read_cc = qed_ptp_hw_read_cc, + .adjfreq = qed_ptp_hw_adjfreq, + .disable = qed_ptp_hw_disable, + .enable = qed_ptp_hw_enable, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.h b/drivers/net/ethernet/qlogic/qed/qed_ptp.h new file mode 100644 index 0000000000..40a11c0e11 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2020 Marvell International Ltd. */ + +#ifndef __QED_PTP_H +#define __QED_PTP_H + +extern const struct qed_eth_ptp_ops qed_ptp_ops_pass; + +#endif /* __QED_PTP_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c new file mode 100644 index 0000000000..5a5dbbb8d8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -0,0 +1,2041 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <net/addrconf.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include <linux/qed/qed_rdma_if.h> +#include "qed_rdma.h" +#include "qed_roce.h" +#include "qed_sp.h" + +int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 max_count, char *name) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "max_count = %08x\n", max_count); + + bmap->max_count = max_count; + + bmap->bitmap = bitmap_zalloc(max_count, GFP_KERNEL); + if (!bmap->bitmap) + return -ENOMEM; + + snprintf(bmap->name, QED_RDMA_MAX_BMAP_NAME, "%s", name); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); + return 0; +} + +int qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 *id_num) +{ + *id_num = find_first_zero_bit(bmap->bitmap, bmap->max_count); + if (*id_num >= bmap->max_count) + return -EINVAL; + + __set_bit(*id_num, bmap->bitmap); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: allocated id %d\n", + bmap->name, *id_num); + + return 0; +} + +void qed_bmap_set_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return; + + __set_bit(id_num, bmap->bitmap); +} + +void qed_bmap_release_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + bool b_acquired; + + if (id_num >= bmap->max_count) + return; + + b_acquired = test_and_clear_bit(id_num, bmap->bitmap); + if (!b_acquired) { + DP_NOTICE(p_hwfn, "%s bitmap: id %d already released\n", + bmap->name, id_num); + return; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "%s bitmap: released id %d\n", + bmap->name, id_num); +} + +int qed_bmap_test_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 id_num) +{ + if (id_num >= bmap->max_count) + return -1; + + return test_bit(id_num, bmap->bitmap); +} + +static bool qed_bmap_is_empty(struct qed_bmap *bmap) +{ + return bitmap_empty(bmap->bitmap, bmap->max_count); +} + +static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) +{ + /* First sb id for RoCE is after all the l2 sb */ + return FEAT_NUM((struct qed_hwfn *)p_hwfn, QED_PF_L2_QUE) + rel_sb_id; +} + +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info; + + p_rdma_info = kzalloc(sizeof(*p_rdma_info), GFP_KERNEL); + if (!p_rdma_info) + return -ENOMEM; + + spin_lock_init(&p_rdma_info->lock); + + p_hwfn->p_rdma_info = p_rdma_info; + return 0; +} + +void qed_rdma_info_free(struct qed_hwfn *p_hwfn) +{ + kfree(p_hwfn->p_rdma_info); + p_hwfn->p_rdma_info = NULL; +} + +static int qed_rdma_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 num_cons, num_tasks; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocating RDMA\n"); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_rdma_info->proto = PROTOCOLID_IWARP; + else + p_rdma_info->proto = PROTOCOLID_ROCE; + + num_cons = qed_cxt_get_proto_cid_count(p_hwfn, p_rdma_info->proto, + NULL); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + p_rdma_info->num_qps = num_cons; + else + p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ + + num_tasks = qed_cxt_get_proto_tid_count(p_hwfn, PROTOCOLID_ROCE); + + /* Each MR uses a single task */ + p_rdma_info->num_mrs = num_tasks; + + /* Queue zone lines are shared between RoCE and L2 in such a way that + * they can be used by each without obstructing the other. + */ + p_rdma_info->queue_zone_base = (u16)RESC_START(p_hwfn, QED_L2_QUEUE); + p_rdma_info->max_queue_zones = (u16)RESC_NUM(p_hwfn, QED_L2_QUEUE); + + /* Allocate a struct with device params and fill it */ + p_rdma_info->dev = kzalloc(sizeof(*p_rdma_info->dev), GFP_KERNEL); + if (!p_rdma_info->dev) + return rc; + + /* Allocate a struct with port params and fill it */ + p_rdma_info->port = kzalloc(sizeof(*p_rdma_info->port), GFP_KERNEL); + if (!p_rdma_info->port) + goto free_rdma_dev; + + /* Allocate bit map for pd's */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->pd_map, RDMA_MAX_PDS, + "PD"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate pd_map, rc = %d\n", + rc); + goto free_rdma_port; + } + + /* Allocate bit map for XRC Domains */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->xrcd_map, + QED_RDMA_MAX_XRCDS, "XRCD"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate xrcd_map,rc = %d\n", rc); + goto free_pd_map; + } + + /* Allocate DPI bitmap */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->dpi_map, + p_hwfn->dpi_count, "DPI"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate DPI bitmap, rc = %d\n", rc); + goto free_xrcd_map; + } + + /* Allocate bitmap for cq's. The maximum number of CQs is bound to + * the number of connections we support. (num_qps in iWARP or + * num_qps/2 in RoCE). + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cq_map, num_cons, "CQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cq bitmap, rc = %d\n", rc); + goto free_dpi_map; + } + + /* Allocate bitmap for toggle bit for cq icids + * We toggle the bit every time we create or resize cq for a given icid. + * Size needs to equal the size of the cq bmap. + */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->toggle_bits, + num_cons, "Toggle"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate toggle bits, rc = %d\n", rc); + goto free_cq_map; + } + + /* Allocate bitmap for itids */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->tid_map, + p_rdma_info->num_mrs, "MR"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate itids bitmaps, rc = %d\n", rc); + goto free_toggle_map; + } + + /* Allocate bitmap for cids used for qps. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->cid_map, num_cons, + "CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate cid bitmap, rc = %d\n", rc); + goto free_tid_map; + } + + /* Allocate bitmap for cids used for responders/requesters. */ + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->real_cid_map, num_cons, + "REAL_CID"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate real cid bitmap, rc = %d\n", rc); + goto free_cid_map; + } + + /* The first SRQ follows the last XRC SRQ. This means that the + * SRQ IDs start from an offset equals to max_xrc_srqs. + */ + p_rdma_info->srq_id_offset = p_hwfn->p_cxt_mngr->xrc_srq_count; + rc = qed_rdma_bmap_alloc(p_hwfn, + &p_rdma_info->xrc_srq_map, + p_hwfn->p_cxt_mngr->xrc_srq_count, "XRC SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate xrc srq bitmap, rc = %d\n", rc); + goto free_real_cid_map; + } + + /* Allocate bitmap for srqs */ + p_rdma_info->num_srqs = p_hwfn->p_cxt_mngr->srq_count; + rc = qed_rdma_bmap_alloc(p_hwfn, &p_rdma_info->srq_map, + p_rdma_info->num_srqs, "SRQ"); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to allocate srq bitmap, rc = %d\n", rc); + goto free_xrc_srq_map; + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_alloc(p_hwfn); + + if (rc) + goto free_srq_map; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocation successful\n"); + return 0; + +free_srq_map: + kfree(p_rdma_info->srq_map.bitmap); +free_xrc_srq_map: + kfree(p_rdma_info->xrc_srq_map.bitmap); +free_real_cid_map: + kfree(p_rdma_info->real_cid_map.bitmap); +free_cid_map: + kfree(p_rdma_info->cid_map.bitmap); +free_tid_map: + kfree(p_rdma_info->tid_map.bitmap); +free_toggle_map: + kfree(p_rdma_info->toggle_bits.bitmap); +free_cq_map: + kfree(p_rdma_info->cq_map.bitmap); +free_dpi_map: + kfree(p_rdma_info->dpi_map.bitmap); +free_xrcd_map: + kfree(p_rdma_info->xrcd_map.bitmap); +free_pd_map: + kfree(p_rdma_info->pd_map.bitmap); +free_rdma_port: + kfree(p_rdma_info->port); +free_rdma_dev: + kfree(p_rdma_info->dev); + + return rc; +} + +void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, bool check) +{ + unsigned int bit, weight, nbits; + unsigned long *b; + + if (!check) + goto end; + + weight = bitmap_weight(bmap->bitmap, bmap->max_count); + if (!weight) + goto end; + + DP_NOTICE(p_hwfn, + "%s bitmap not free - size=%d, weight=%d, 512 bits per line\n", + bmap->name, bmap->max_count, weight); + + for (bit = 0; bit < bmap->max_count; bit += 512) { + b = bmap->bitmap + BITS_TO_LONGS(bit); + nbits = min(bmap->max_count - bit, 512U); + + if (!bitmap_empty(b, nbits)) + DP_NOTICE(p_hwfn, + "line 0x%04x: %*pb\n", bit / 512, nbits, b); + } + +end: + bitmap_free(bmap->bitmap); + bmap->bitmap = NULL; +} + +static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_resc_free(p_hwfn); + + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->toggle_bits, 0); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->tid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrc_srq_map, 1); + qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, 1); + + kfree(p_rdma_info->port); + kfree(p_rdma_info->dev); +} + +static void qed_rdma_free_tid(void *rdma_cxt, u32 itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static void qed_rdma_free_reserved_lkey(struct qed_hwfn *p_hwfn) +{ + qed_rdma_free_tid(p_hwfn, p_hwfn->p_rdma_info->dev->reserved_lkey); +} + +static void qed_rdma_free(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); + + qed_rdma_free_reserved_lkey(p_hwfn); + qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto); + qed_rdma_resc_free(p_hwfn); +} + +static void qed_rdma_init_events(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_events *events; + + events = &p_hwfn->p_rdma_info->events; + + events->unaffiliated_event = params->events->unaffiliated_event; + events->affiliated_event = params->events->affiliated_event; + events->context = params->events->context; +} + +static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + struct qed_dev *cdev = p_hwfn->cdev; + u32 pci_status_control; + u32 num_qps; + + /* Vendor specific information */ + dev->vendor_id = cdev->vendor_id; + dev->vendor_part_id = cdev->device_id; + dev->hw_ver = cdev->chip_rev; + dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | + (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION); + + addrconf_addr_eui48((u8 *)&dev->sys_image_guid, + p_hwfn->hw_info.hw_mac_addr); + + dev->node_guid = dev->sys_image_guid; + + dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE, + RDMA_MAX_SGE_PER_RQ_WQE); + + if (cdev->rdma_max_sge) + dev->max_sge = min_t(u32, cdev->rdma_max_sge, dev->max_sge); + + dev->max_srq_sge = QED_RDMA_MAX_SGE_PER_SRQ_WQE; + if (p_hwfn->cdev->rdma_max_srq_sge) { + dev->max_srq_sge = min_t(u32, + p_hwfn->cdev->rdma_max_srq_sge, + dev->max_srq_sge); + } + dev->max_inline = ROCE_REQ_MAX_INLINE_DATA_SIZE; + + dev->max_inline = (cdev->rdma_max_inline) ? + min_t(u32, cdev->rdma_max_inline, dev->max_inline) : + dev->max_inline; + + dev->max_wqe = QED_RDMA_MAX_WQE; + dev->max_cnq = (u8)FEAT_NUM(p_hwfn, QED_RDMA_CNQ); + + /* The number of QPs may be higher than QED_ROCE_MAX_QPS, because + * it is up-aligned to 16 and then to ILT page size within qed cxt. + * This is OK in terms of ILT but we don't want to configure the FW + * above its abilities + */ + num_qps = ROCE_MAX_QPS; + num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); + dev->max_qp = num_qps; + + /* CQs uses the same icids that QPs use hence they are limited by the + * number of icids. There are two icids per QP. + */ + dev->max_cq = num_qps * 2; + + /* The number of mrs is smaller by 1 since the first is reserved */ + dev->max_mr = p_hwfn->p_rdma_info->num_mrs - 1; + dev->max_mr_size = QED_RDMA_MAX_MR_SIZE; + + /* The maximum CQE capacity per CQ supported. + * max number of cqes will be in two layer pbl, + * 8 is the pointer size in bytes + * 32 is the size of cq element in bytes + */ + if (params->cq_mode == QED_RDMA_CQ_MODE_32_BITS) + dev->max_cqe = QED_RDMA_MAX_CQE_32_BIT; + else + dev->max_cqe = QED_RDMA_MAX_CQE_16_BIT; + + dev->max_mw = 0; + dev->max_mr_mw_fmr_pbl = (PAGE_SIZE / 8) * (PAGE_SIZE / 8); + dev->max_mr_mw_fmr_size = dev->max_mr_mw_fmr_pbl * PAGE_SIZE; + if (QED_IS_ROCE_PERSONALITY(p_hwfn)) + dev->max_pkey = QED_RDMA_MAX_P_KEY; + + dev->max_srq = p_hwfn->p_rdma_info->num_srqs; + dev->max_srq_wr = QED_RDMA_MAX_SRQ_WQE_ELEM; + dev->max_qp_resp_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + (RDMA_RESP_RD_ATOMIC_ELM_SIZE * 2); + dev->max_qp_req_rd_atomic_resc = RDMA_RING_PAGE_SIZE / + RDMA_REQ_RD_ATOMIC_ELM_SIZE; + dev->max_dev_resp_rd_atomic_resc = dev->max_qp_resp_rd_atomic_resc * + p_hwfn->p_rdma_info->num_qps; + dev->page_size_caps = QED_RDMA_PAGE_SIZE_CAPS; + dev->dev_ack_delay = QED_RDMA_ACK_DELAY; + dev->max_pd = RDMA_MAX_PDS; + dev->max_ah = p_hwfn->p_rdma_info->num_qps; + dev->max_stats_queues = (u8)RESC_NUM(p_hwfn, QED_RDMA_STATS_QUEUE); + + /* Set capablities */ + dev->dev_caps = 0; + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RNR_NAK, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_ACTIVE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_PORT_CHANGE_EVENT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_RESIZE_CQ, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_MEMORY_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_BASE_QUEUE_EXT, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ZBVA, 1); + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_LOCAL_INV_FENCE, 1); + + /* Check atomic operations support in PCI configuration space. */ + pcie_capability_read_dword(cdev->pdev, PCI_EXP_DEVCTL2, + &pci_status_control); + + if (pci_status_control & PCI_EXP_DEVCTL2_LTR_EN) + SET_FIELD(dev->dev_caps, QED_RDMA_DEV_CAP_ATOMIC_OP, 1); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_devinfo(p_hwfn); +} + +static void qed_rdma_init_port(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_port *port = p_hwfn->p_rdma_info->port; + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + port->port_state = p_hwfn->mcp_info->link_output.link_up ? + QED_RDMA_PORT_UP : QED_RDMA_PORT_DOWN; + + port->max_msg_size = min_t(u64, + (dev->max_mr_mw_fmr_size * + p_hwfn->cdev->rdma_max_sge), + BIT(31)); + + port->pkey_bad_counter = 0; +} + +static int qed_rdma_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW\n"); + p_hwfn->b_rdma_enabled_in_prs = false; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_init_hw(p_hwfn, p_ptt); + else + rc = qed_roce_init_hw(p_hwfn, p_ptt); + + return rc; +} + +static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, + struct qed_rdma_start_in_params *params, + struct qed_ptt *p_ptt) +{ + struct rdma_init_func_ramrod_data *p_ramrod; + struct qed_rdma_cnq_params *p_cnq_pbl_list; + struct rdma_init_func_hdr *p_params_header; + struct rdma_cnq_params *p_cnq_params; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u32 cnq_id, sb_id; + u16 igu_sb_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Starting FW\n"); + + /* Save the number of cnqs for the function close ramrod */ + p_hwfn->p_rdma_info->num_cnqs = params->desired_cnq; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_INIT, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + qed_iwarp_init_fw_ramrod(p_hwfn, + &p_ent->ramrod.iwarp_init_func); + p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; + } else { + p_ramrod = &p_ent->ramrod.roce_init_func.rdma; + } + + p_params_header = &p_ramrod->params_header; + p_params_header->cnq_start_offset = (u8)RESC_START(p_hwfn, + QED_RDMA_CNQ_RAM); + p_params_header->num_cnqs = params->desired_cnq; + p_params_header->first_reg_srq_id = + cpu_to_le16(p_hwfn->p_rdma_info->srq_id_offset); + p_params_header->reg_srq_base_addr = + cpu_to_le32(qed_cxt_get_ilt_page_size(p_hwfn, ILT_CLI_TSDM)); + if (params->cq_mode == QED_RDMA_CQ_MODE_16_BITS) + p_params_header->cq_ring_mode = 1; + else + p_params_header->cq_ring_mode = 0; + + for (cnq_id = 0; cnq_id < params->desired_cnq; cnq_id++) { + sb_id = qed_rdma_get_sb_id(p_hwfn, cnq_id); + igu_sb_id = qed_get_igu_sb_id(p_hwfn, sb_id); + p_ramrod->cnq_params[cnq_id].sb_num = cpu_to_le16(igu_sb_id); + p_cnq_params = &p_ramrod->cnq_params[cnq_id]; + p_cnq_pbl_list = ¶ms->cnq_pbl_list[cnq_id]; + + p_cnq_params->sb_index = p_hwfn->pf_params.rdma_pf_params.gl_pi; + p_cnq_params->num_pbl_pages = p_cnq_pbl_list->num_pbl_pages; + + DMA_REGPAIR_LE(p_cnq_params->pbl_base_addr, + p_cnq_pbl_list->pbl_ptr); + + /* we assume here that cnq_id and qz_offset are the same */ + p_cnq_params->queue_zone_num = + cpu_to_le16(p_hwfn->p_rdma_info->queue_zone_base + + cnq_id); + } + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_rdma_alloc_tid(void *rdma_cxt, u32 *itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->tid_map, itid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) + goto out; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_TASK, *itid); +out: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Allocate TID - done, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_reserve_lkey(struct qed_hwfn *p_hwfn) +{ + struct qed_rdma_device *dev = p_hwfn->p_rdma_info->dev; + + /* Tid 0 will be used as the key for "reserved MR". + * The driver should allocate memory for it so it can be loaded but no + * ramrod should be passed on it. + */ + qed_rdma_alloc_tid(p_hwfn, &dev->reserved_lkey); + if (dev->reserved_lkey != RDMA_RESERVED_LKEY) { + DP_NOTICE(p_hwfn, + "Reserved lkey should be equal to RDMA_RESERVED_LKEY\n"); + return -EINVAL; + } + + return 0; +} + +static int qed_rdma_setup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_rdma_start_in_params *params) +{ + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA setup\n"); + + qed_rdma_init_devinfo(p_hwfn, params); + qed_rdma_init_port(p_hwfn); + qed_rdma_init_events(p_hwfn, params); + + rc = qed_rdma_reserve_lkey(p_hwfn); + if (rc) + return rc; + + rc = qed_rdma_init_hw(p_hwfn, p_ptt); + if (rc) + return rc; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_setup(p_hwfn, params); + if (rc) + return rc; + } else { + rc = qed_roce_setup(p_hwfn); + if (rc) + return rc; + } + + return qed_rdma_start_fw(p_hwfn, params, p_ptt); +} + +static int qed_rdma_stop(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_close_func_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_ptt *p_ptt; + u32 ll2_ethertype_en; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop\n"); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Failed to acquire PTT\n"); + return rc; + } + + /* Disable RoCE search */ + qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 0); + p_hwfn->b_rdma_enabled_in_prs = false; + p_hwfn->p_rdma_info->active = 0; + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en & 0xFFFE)); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_stop(p_hwfn); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + return rc; + } + } else { + qed_roce_stop(p_hwfn); + } + + qed_ptt_release(p_hwfn, p_ptt); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + /* Stop RoCE */ + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_FUNC_CLOSE, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto out; + + p_ramrod = &p_ent->ramrod.rdma_close_func; + + p_ramrod->num_cnqs = p_hwfn->p_rdma_info->num_cnqs; + p_ramrod->cnq_start_offset = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + +out: + qed_rdma_free(p_hwfn); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA stop done, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_add_user(void *rdma_cxt, + struct qed_rdma_add_user_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 dpi_start_offset; + u32 returned_id = 0; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding User\n"); + + /* Allocate DPI */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, + &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + out_params->dpi = (u16)returned_id; + + /* Calculate the corresponding DPI address */ + dpi_start_offset = p_hwfn->dpi_start_offset; + + out_params->dpi_addr = p_hwfn->doorbells + dpi_start_offset + + out_params->dpi * p_hwfn->dpi_size; + + out_params->dpi_phys_addr = p_hwfn->db_phys_addr + + dpi_start_offset + + ((out_params->dpi) * p_hwfn->dpi_size); + + out_params->dpi_size = p_hwfn->dpi_size; + out_params->wid_count = p_hwfn->wid_count; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Adding user - done, rc = %d\n", rc); + return rc; +} + +static struct qed_rdma_port *qed_rdma_query_port(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_port *p_port = p_hwfn->p_rdma_info->port; + struct qed_mcp_link_state *p_link_output; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA Query port\n"); + + /* The link state is saved only for the leading hwfn */ + p_link_output = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output; + + p_port->port_state = p_link_output->link_up ? QED_RDMA_PORT_UP + : QED_RDMA_PORT_DOWN; + + p_port->link_speed = p_link_output->speed; + + p_port->max_msg_size = RDMA_MAX_DATA_SIZE_IN_WQE; + + return p_port; +} + +static struct qed_rdma_device *qed_rdma_query_device(void *rdma_cxt) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query device\n"); + + /* Return struct with device parameters */ + return p_hwfn->p_rdma_info->dev; +} + +static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod) +{ + struct qed_hwfn *p_hwfn; + u16 qz_num; + u32 addr; + + p_hwfn = (struct qed_hwfn *)rdma_cxt; + + if (qz_offset > p_hwfn->p_rdma_info->max_queue_zones) { + DP_NOTICE(p_hwfn, + "queue zone offset %d is too large (max is %d)\n", + qz_offset, p_hwfn->p_rdma_info->max_queue_zones); + return; + } + + qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset; + addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_COMMON_QUEUE_CONS, qz_num); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + wmb(); +} + +static int qed_fill_rdma_dev_info(struct qed_dev *cdev, + struct qed_dev_rdma_info *info) +{ + struct qed_hwfn *p_hwfn = QED_AFFIN_HWFN(cdev); + + memset(info, 0, sizeof(*info)); + + info->rdma_type = QED_IS_ROCE_PERSONALITY(p_hwfn) ? + QED_RDMA_TYPE_ROCE : QED_RDMA_TYPE_IWARP; + + info->user_dpm_enabled = (p_hwfn->db_bar_no_edpm == 0); + + qed_fill_dev_info(cdev, &info->common); + + return 0; +} + +static int qed_rdma_get_sb_start(struct qed_dev *cdev) +{ + int feat_num; + + if (cdev->num_hwfns > 1) + feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE); + else + feat_num = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_PF_L2_QUE) * + cdev->num_hwfns; + + return feat_num; +} + +static int qed_rdma_get_min_cnq_msix(struct qed_dev *cdev) +{ + int n_cnq = FEAT_NUM(QED_AFFIN_HWFN(cdev), QED_RDMA_CNQ); + int n_msix = cdev->int_params.rdma_msix_cnt; + + return min_t(int, n_cnq, n_msix); +} + +static int qed_rdma_set_int(struct qed_dev *cdev, u16 cnt) +{ + int limit = 0; + + /* Mark the fastpath as free/used */ + cdev->int_params.fp_initialized = cnt ? true : false; + + if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) { + DP_ERR(cdev, + "qed roce supports only MSI-X interrupts (detected %d).\n", + cdev->int_params.out.int_mode); + return -EINVAL; + } else if (cdev->int_params.fp_msix_cnt) { + limit = cdev->int_params.rdma_msix_cnt; + } + + if (!limit) + return -ENOMEM; + + return min_t(int, cnt, limit); +} + +static int qed_rdma_get_int(struct qed_dev *cdev, struct qed_int_info *info) +{ + memset(info, 0, sizeof(*info)); + + if (!cdev->int_params.fp_initialized) { + DP_INFO(cdev, + "Protocol driver requested interrupt information, but its support is not yet configured\n"); + return -EINVAL; + } + + if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { + int msix_base = cdev->int_params.rdma_msix_base; + + info->msix_cnt = cdev->int_params.rdma_msix_cnt; + info->msix = &cdev->int_params.msix_table[msix_base]; + + DP_VERBOSE(cdev, QED_MSG_RDMA, "msix_cnt = %d msix_base=%d\n", + info->msix_cnt, msix_base); + } + + return 0; +} + +static int qed_rdma_alloc_pd(void *rdma_cxt, u16 *pd) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 returned_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD\n"); + + /* Allocates an unused protection domain */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->pd_map, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + *pd = (u16)returned_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc PD - done, rc = %d\n", rc); + return rc; +} + +static void qed_rdma_free_pd(void *rdma_cxt, u16 pd) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "pd = %08x\n", pd); + + /* Returns a previously allocated protection domain for reuse */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->pd_map, pd); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static int qed_rdma_alloc_xrcd(void *rdma_cxt, u16 *xrcd_id) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + u32 returned_id; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD\n"); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, + &p_hwfn->p_rdma_info->xrcd_map, + &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + if (rc) { + DP_NOTICE(p_hwfn, "Failed in allocating xrcd id\n"); + return rc; + } + + *xrcd_id = (u16)returned_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Alloc XRCD - done, rc = %d\n", rc); + return rc; +} + +static void qed_rdma_free_xrcd(void *rdma_cxt, u16 xrcd_id) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "xrcd_id = %08x\n", xrcd_id); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->xrcd_map, xrcd_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static enum qed_rdma_toggle_bit +qed_rdma_toggle_bit_create_resize_cq(struct qed_hwfn *p_hwfn, u16 icid) +{ + struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; + enum qed_rdma_toggle_bit toggle_bit; + u32 bmap_id; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", icid); + + /* the function toggle the bit that is related to a given icid + * and returns the new toggle bit's value + */ + bmap_id = icid - qed_cxt_get_proto_cid_start(p_hwfn, p_info->proto); + + spin_lock_bh(&p_info->lock); + toggle_bit = !test_and_change_bit(bmap_id, + p_info->toggle_bits.bitmap); + spin_unlock_bh(&p_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QED_RDMA_TOGGLE_BIT_= %d\n", + toggle_bit); + + return toggle_bit; +} + +static int qed_rdma_create_cq(void *rdma_cxt, + struct qed_rdma_create_cq_in_params *params, + u16 *icid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_info *p_info = p_hwfn->p_rdma_info; + struct rdma_create_cq_ramrod_data *p_ramrod; + enum qed_rdma_toggle_bit toggle_bit; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u32 returned_id, start_cid; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "cq_handle = %08x%08x\n", + params->cq_handle_hi, params->cq_handle_lo); + + /* Allocate icid */ + spin_lock_bh(&p_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_info->cq_map, &returned_id); + spin_unlock_bh(&p_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, "Can't create CQ, rc = %d\n", rc); + return rc; + } + + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, + p_info->proto); + *icid = returned_id + start_cid; + + /* Check if icid requires a page allocation */ + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, *icid); + if (rc) + goto err; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = *icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + /* Send create CQ ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_CQ, + p_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_cq; + + p_ramrod->cq_handle.hi = cpu_to_le32(params->cq_handle_hi); + p_ramrod->cq_handle.lo = cpu_to_le32(params->cq_handle_lo); + p_ramrod->dpi = cpu_to_le16(params->dpi); + p_ramrod->is_two_level_pbl = params->pbl_two_level; + p_ramrod->max_cqes = cpu_to_le32(params->cq_size); + DMA_REGPAIR_LE(p_ramrod->pbl_addr, params->pbl_ptr); + p_ramrod->pbl_num_pages = cpu_to_le16(params->pbl_num_pages); + p_ramrod->cnq_id = (u8)RESC_START(p_hwfn, QED_RDMA_CNQ_RAM) + + params->cnq_id; + p_ramrod->int_timeout = cpu_to_le16(params->int_timeout); + + /* toggle the bit for every resize or create cq for a given icid */ + toggle_bit = qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); + + p_ramrod->toggle_bit = toggle_bit; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) { + /* restore toggle bit */ + qed_rdma_toggle_bit_create_resize_cq(p_hwfn, *icid); + goto err; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Created CQ, rc = %d\n", rc); + return rc; + +err: + /* release allocated icid */ + spin_lock_bh(&p_info->lock); + qed_bmap_release_id(p_hwfn, &p_info->cq_map, returned_id); + spin_unlock_bh(&p_info->lock); + DP_NOTICE(p_hwfn, "Create CQ failed, rc = %d\n", rc); + + return rc; +} + +static int +qed_rdma_destroy_cq(void *rdma_cxt, + struct qed_rdma_destroy_cq_in_params *in_params, + struct qed_rdma_destroy_cq_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_destroy_cq_output_params *p_ramrod_res; + struct rdma_destroy_cq_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t ramrod_res_phys; + enum protocol_type proto; + int rc = -ENOMEM; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid); + + p_ramrod_res = + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + &ramrod_res_phys, GFP_KERNEL); + if (!p_ramrod_res) { + DP_NOTICE(p_hwfn, + "qed destroy cq failed: cannot allocate memory (ramrod)\n"); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = in_params->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + proto = p_hwfn->p_rdma_info->proto; + /* Send destroy CQ ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_CQ, + proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_destroy_cq; + DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->num_cq_notif = le16_to_cpu(p_ramrod_res->cnq_num); + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + p_ramrod_res, ramrod_res_phys); + + /* Free icid */ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + + qed_bmap_release_id(p_hwfn, + &p_hwfn->p_rdma_info->cq_map, + (in_params->icid - + qed_cxt_get_proto_cid_start(p_hwfn, proto))); + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroyed CQ, rc = %d\n", rc); + return rc; + +err: dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct rdma_destroy_cq_output_params), + p_ramrod_res, ramrod_res_phys); + + return rc; +} + +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac) +{ + p_fw_mac[0] = cpu_to_le16((p_qed_mac[0] << 8) + p_qed_mac[1]); + p_fw_mac[1] = cpu_to_le16((p_qed_mac[2] << 8) + p_qed_mac[3]); + p_fw_mac[2] = cpu_to_le16((p_qed_mac[4] << 8) + p_qed_mac[5]); +} + +static int qed_rdma_query_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + /* The following fields are filled in from qp and not FW as they can't + * be modified by FW + */ + out_params->mtu = qp->mtu; + out_params->dest_qp = qp->dest_qp; + out_params->incoming_atomic_en = qp->incoming_atomic_en; + out_params->e2e_flow_control_en = qp->e2e_flow_control_en; + out_params->incoming_rdma_read_en = qp->incoming_rdma_read_en; + out_params->incoming_rdma_write_en = qp->incoming_rdma_write_en; + out_params->dgid = qp->dgid; + out_params->flow_label = qp->flow_label; + out_params->hop_limit_ttl = qp->hop_limit_ttl; + out_params->traffic_class_tos = qp->traffic_class_tos; + out_params->timeout = qp->ack_timeout; + out_params->rnr_retry = qp->rnr_retry_cnt; + out_params->retry_cnt = qp->retry_cnt; + out_params->min_rnr_nak_timer = qp->min_rnr_nak_timer; + out_params->pkey_index = 0; + out_params->max_rd_atomic = qp->max_rd_atomic_req; + out_params->max_dest_rd_atomic = qp->max_rd_atomic_resp; + out_params->sqd_async = qp->sqd_async; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + qed_iwarp_query_qp(qp, out_params); + else + rc = qed_roce_query_qp(p_hwfn, qp, out_params); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Query QP, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_destroy_qp(void *rdma_cxt, struct qed_rdma_qp *qp) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) + rc = qed_iwarp_destroy_qp(p_hwfn, qp); + else + rc = qed_roce_destroy_qp(p_hwfn, qp); + + /* free qp params struct */ + kfree(qp); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "QP destroyed\n"); + return rc; +} + +static struct qed_rdma_qp * +qed_rdma_create_qp(void *rdma_cxt, + struct qed_rdma_create_qp_in_params *in_params, + struct qed_rdma_create_qp_out_params *out_params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_rdma_qp *qp; + u8 max_stats_queues; + int rc; + + if (!rdma_cxt || !in_params || !out_params || + !p_hwfn->p_rdma_info->active) { + pr_err("qed roce create qp failed due to NULL entry (rdma_cxt=%p, in=%p, out=%p, roce_info=?\n", + rdma_cxt, in_params, out_params); + return NULL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "qed rdma create qp called with qp_handle = %08x%08x\n", + in_params->qp_handle_hi, in_params->qp_handle_lo); + + /* Some sanity checks... */ + max_stats_queues = p_hwfn->p_rdma_info->dev->max_stats_queues; + if (in_params->stats_queue >= max_stats_queues) { + DP_ERR(p_hwfn->cdev, + "qed rdma create qp failed due to invalid statistics queue %d. maximum is %d\n", + in_params->stats_queue, max_stats_queues); + return NULL; + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + if (in_params->sq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Sq num pages: %d exceeds maximum\n", + in_params->sq_num_pages); + return NULL; + } + if (in_params->rq_num_pages * sizeof(struct regpair) > + IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE) { + DP_NOTICE(p_hwfn->cdev, + "Rq num pages: %d exceeds maximum\n", + in_params->rq_num_pages); + return NULL; + } + } + + qp = kzalloc(sizeof(*qp), GFP_KERNEL); + if (!qp) + return NULL; + + qp->cur_state = QED_ROCE_QP_STATE_RESET; + qp->qp_handle.hi = cpu_to_le32(in_params->qp_handle_hi); + qp->qp_handle.lo = cpu_to_le32(in_params->qp_handle_lo); + qp->qp_handle_async.hi = cpu_to_le32(in_params->qp_handle_async_hi); + qp->qp_handle_async.lo = cpu_to_le32(in_params->qp_handle_async_lo); + qp->use_srq = in_params->use_srq; + qp->signal_all = in_params->signal_all; + qp->fmr_and_reserved_lkey = in_params->fmr_and_reserved_lkey; + qp->pd = in_params->pd; + qp->dpi = in_params->dpi; + qp->sq_cq_id = in_params->sq_cq_id; + qp->sq_num_pages = in_params->sq_num_pages; + qp->sq_pbl_ptr = in_params->sq_pbl_ptr; + qp->rq_cq_id = in_params->rq_cq_id; + qp->rq_num_pages = in_params->rq_num_pages; + qp->rq_pbl_ptr = in_params->rq_pbl_ptr; + qp->srq_id = in_params->srq_id; + qp->req_offloaded = false; + qp->resp_offloaded = false; + qp->e2e_flow_control_en = qp->use_srq ? false : true; + qp->stats_queue = in_params->stats_queue; + qp->qp_type = in_params->qp_type; + qp->xrcd_id = in_params->xrcd_id; + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + rc = qed_iwarp_create_qp(p_hwfn, qp, out_params); + qp->qpid = qp->icid; + } else { + qp->edpm_mode = GET_FIELD(in_params->flags, QED_ROCE_EDPM_MODE); + rc = qed_roce_alloc_cid(p_hwfn, &qp->icid); + qp->qpid = ((0xFF << 16) | qp->icid); + } + + if (rc) { + kfree(qp); + return NULL; + } + + out_params->icid = qp->icid; + out_params->qp_id = qp->qpid; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Create QP, rc = %d\n", rc); + return qp; +} + +static int qed_rdma_modify_qp(void *rdma_cxt, + struct qed_rdma_qp *qp, + struct qed_rdma_modify_qp_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + enum qed_roce_qp_state prev_state; + int rc = 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x params->new_state=%d\n", + qp->icid, params->new_state); + + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)) { + qp->incoming_rdma_read_en = params->incoming_rdma_read_en; + qp->incoming_rdma_write_en = params->incoming_rdma_write_en; + qp->incoming_atomic_en = params->incoming_atomic_en; + } + + /* Update QP structure with the updated values */ + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_ROCE_MODE)) + qp->roce_mode = params->roce_mode; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)) + qp->pkey = params->pkey; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_E2E_FLOW_CONTROL_EN)) + qp->e2e_flow_control_en = params->e2e_flow_control_en; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_DEST_QP)) + qp->dest_qp = params->dest_qp; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)) { + /* Indicates that the following parameters have changed: + * Traffic class, flow label, hop limit, source GID, + * destination GID, loopback indicator + */ + qp->traffic_class_tos = params->traffic_class_tos; + qp->flow_label = params->flow_label; + qp->hop_limit_ttl = params->hop_limit_ttl; + + qp->sgid = params->sgid; + qp->dgid = params->dgid; + qp->udp_src_port = 0; + qp->vlan_id = params->vlan_id; + qp->mtu = params->mtu; + qp->lb_indication = params->lb_indication; + memcpy((u8 *)&qp->remote_mac_addr[0], + (u8 *)¶ms->remote_mac_addr[0], ETH_ALEN); + if (params->use_local_mac) { + memcpy((u8 *)&qp->local_mac_addr[0], + (u8 *)¶ms->local_mac_addr[0], ETH_ALEN); + } else { + memcpy((u8 *)&qp->local_mac_addr[0], + (u8 *)&p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); + } + } + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RQ_PSN)) + qp->rq_psn = params->rq_psn; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_SQ_PSN)) + qp->sq_psn = params->sq_psn; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)) + qp->max_rd_atomic_req = params->max_rd_atomic_req; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)) + qp->max_rd_atomic_resp = params->max_rd_atomic_resp; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)) + qp->ack_timeout = params->ack_timeout; + if (GET_FIELD(params->modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)) + qp->retry_cnt = params->retry_cnt; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)) + qp->rnr_retry_cnt = params->rnr_retry_cnt; + if (GET_FIELD(params->modify_flags, + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)) + qp->min_rnr_nak_timer = params->min_rnr_nak_timer; + + qp->sqd_async = params->sqd_async; + + prev_state = qp->cur_state; + if (GET_FIELD(params->modify_flags, + QED_RDMA_MODIFY_QP_VALID_NEW_STATE)) { + qp->cur_state = params->new_state; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "qp->cur_state=%d\n", + qp->cur_state); + } + + switch (qp->qp_type) { + case QED_RDMA_QP_TYPE_XRC_INI: + qp->has_req = true; + break; + case QED_RDMA_QP_TYPE_XRC_TGT: + qp->has_resp = true; + break; + default: + qp->has_req = true; + qp->has_resp = true; + } + + if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { + enum qed_iwarp_qp_state new_state = + qed_roce2iwarp_state(qp->cur_state); + + rc = qed_iwarp_modify_qp(p_hwfn, qp, new_state, 0); + } else { + rc = qed_roce_modify_qp(p_hwfn, qp, prev_state, params); + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify QP, rc = %d\n", rc); + return rc; +} + +static int +qed_rdma_register_tid(void *rdma_cxt, + struct qed_rdma_register_tid_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_register_tid_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + enum rdma_tid_type tid_type; + u8 fw_return_code; + u16 flags = 0; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", params->itid); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_REGISTER_MR, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (p_hwfn->p_rdma_info->last_tid < params->itid) + p_hwfn->p_rdma_info->last_tid = params->itid; + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL, + params->pbl_two_level); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, + false); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); + + /* Don't initialize D/C field, as it may override other bits. */ + if (!(params->tid_type == QED_RDMA_TID_FMR) && !(params->dma_mr)) + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG, + params->page_size_log - 12); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ, + params->remote_read); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE, + params->remote_write); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC, + params->remote_atomic); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE, + params->local_write); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ, + params->local_read); + + SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND, + params->mw_bind); + + p_ramrod = &p_ent->ramrod.rdma_register_tid; + p_ramrod->flags = cpu_to_le16(flags); + + SET_FIELD(p_ramrod->flags1, + RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG, + params->pbl_page_size_log - 12); + + SET_FIELD(p_ramrod->flags2, RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR, + params->dma_mr); + + switch (params->tid_type) { + case QED_RDMA_TID_REGISTERED_MR: + tid_type = RDMA_TID_REGISTERED_MR; + break; + case QED_RDMA_TID_FMR: + tid_type = RDMA_TID_FMR; + break; + case QED_RDMA_TID_MW: + tid_type = RDMA_TID_MW; + break; + default: + rc = -EINVAL; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; + } + + SET_FIELD(p_ramrod->flags1, RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE, + tid_type); + + p_ramrod->itid = cpu_to_le32(params->itid); + p_ramrod->key = params->key; + p_ramrod->pd = cpu_to_le16(params->pd); + p_ramrod->length_hi = (u8)(params->length >> 32); + p_ramrod->length_lo = DMA_LO_LE(params->length); + DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); + DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); + + /* DIF */ + if (params->dif_enabled) { + SET_FIELD(p_ramrod->flags2, + RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); + DMA_REGPAIR_LE(p_ramrod->dif_error_addr, + params->dif_error_addr); + } + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) + return rc; + + if (fw_return_code != RDMA_RETURN_OK) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Register TID, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_deregister_tid(void *rdma_cxt, u32 itid) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct rdma_deregister_tid_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + struct qed_ptt *p_ptt; + u8 fw_return_code; + int rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "itid = %08x\n", itid); + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, RDMA_RAMROD_DEREGISTER_MR, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.rdma_deregister_tid; + p_ramrod->itid = cpu_to_le32(itid); + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "rc = %d\n", rc); + return rc; + } + + if (fw_return_code == RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", fw_return_code); + return -EINVAL; + } else if (fw_return_code == RDMA_RETURN_NIG_DRAIN_REQ) { + /* Bit indicating that the TID is in use and a nig drain is + * required before sending the ramrod again + */ + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + rc = -EBUSY; + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to acquire PTT\n"); + return rc; + } + + rc = qed_mcp_drain(p_hwfn, p_ptt); + if (rc) { + qed_ptt_release(p_hwfn, p_ptt); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Drain failed\n"); + return rc; + } + + qed_ptt_release(p_hwfn, p_ptt); + + /* Resend the ramrod */ + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DEREGISTER_MR, + p_hwfn->p_rdma_info->proto, + &init_data); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Failed to init sp-element\n"); + return rc; + } + + rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); + if (rc) { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Ramrod failed\n"); + return rc; + } + + if (fw_return_code != RDMA_RETURN_OK) { + DP_NOTICE(p_hwfn, "fw_return_code = %d\n", + fw_return_code); + return rc; + } + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "De-registered TID, rc = %d\n", rc); + return rc; +} + +static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) +{ + return QED_AFFIN_HWFN(cdev); +} + +static struct qed_bmap *qed_rdma_get_srq_bmap(struct qed_hwfn *p_hwfn, + bool is_xrc) +{ + if (is_xrc) + return &p_hwfn->p_rdma_info->xrc_srq_map; + + return &p_hwfn->p_rdma_info->srq_map; +} + +static int qed_rdma_modify_srq(void *rdma_cxt, + struct qed_rdma_modify_srq_in_params *in_params) +{ + struct rdma_srq_modify_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + u16 opaque_fid; + int rc; + + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_MODIFY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_modify_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + opaque_fid = p_hwfn->hw_info.opaque_fid; + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->wqe_limit = cpu_to_le32(in_params->wqe_limit); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "modified SRQ id = %x, is_xrc=%u\n", + in_params->srq_id, in_params->is_xrc); + + return rc; +} + +static int +qed_rdma_destroy_srq(void *rdma_cxt, + struct qed_rdma_destroy_srq_in_params *in_params) +{ + struct rdma_srq_destroy_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + struct qed_spq_entry *p_ent; + struct qed_bmap *bmap; + u16 opaque_fid; + u16 offset; + int rc; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_DESTROY_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.rdma_destroy_srq; + p_ramrod->srq_id.srq_idx = cpu_to_le16(in_params->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); + offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, in_params->srq_id - offset); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "XRC/SRQ destroyed Id = %x, is_xrc=%u\n", + in_params->srq_id, in_params->is_xrc); + + return rc; +} + +static int +qed_rdma_create_srq(void *rdma_cxt, + struct qed_rdma_create_srq_in_params *in_params, + struct qed_rdma_create_srq_out_params *out_params) +{ + struct rdma_srq_create_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data = {}; + struct qed_hwfn *p_hwfn = rdma_cxt; + enum qed_cxt_elem_type elem_type; + struct qed_spq_entry *p_ent; + u16 opaque_fid, srq_id; + struct qed_bmap *bmap; + u32 returned_id; + u16 offset; + int rc; + + bmap = qed_rdma_get_srq_bmap(p_hwfn, in_params->is_xrc); + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, bmap, &returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + if (rc) { + DP_NOTICE(p_hwfn, + "failed to allocate xrc/srq id (is_xrc=%u)\n", + in_params->is_xrc); + return rc; + } + + elem_type = (in_params->is_xrc) ? (QED_ELEM_XRC_SRQ) : (QED_ELEM_SRQ); + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, elem_type, returned_id); + if (rc) + goto err; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + + opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + RDMA_RAMROD_CREATE_SRQ, + p_hwfn->p_rdma_info->proto, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.rdma_create_srq; + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, in_params->pbl_base_addr); + p_ramrod->pages_in_srq_pbl = cpu_to_le16(in_params->num_pages); + p_ramrod->pd_id = cpu_to_le16(in_params->pd_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(opaque_fid); + p_ramrod->page_size = cpu_to_le16(in_params->page_size); + DMA_REGPAIR_LE(p_ramrod->producers_addr, in_params->prod_pair_addr); + offset = (in_params->is_xrc) ? 0 : p_hwfn->p_rdma_info->srq_id_offset; + srq_id = (u16)returned_id + offset; + p_ramrod->srq_id.srq_idx = cpu_to_le16(srq_id); + + if (in_params->is_xrc) { + SET_FIELD(p_ramrod->flags, + RDMA_SRQ_CREATE_RAMROD_DATA_XRC_FLAG, 1); + SET_FIELD(p_ramrod->flags, + RDMA_SRQ_CREATE_RAMROD_DATA_RESERVED_KEY_EN, + in_params->reserved_key_en); + p_ramrod->xrc_srq_cq_cid = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | + in_params->cq_cid); + p_ramrod->xrc_domain = cpu_to_le16(in_params->xrcd_id); + } + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + out_params->srq_id = srq_id; + + DP_VERBOSE(p_hwfn, + QED_MSG_RDMA, + "XRC/SRQ created Id = %x (is_xrc=%u)\n", + out_params->srq_id, in_params->is_xrc); + return rc; + +err: + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, bmap, returned_id); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + + return rc; +} + +bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn) +{ + bool result; + + /* if rdma wasn't activated yet, naturally there are no qps */ + if (!p_hwfn->p_rdma_info->active) + return false; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + if (!p_hwfn->p_rdma_info->cid_map.bitmap) + result = false; + else + result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); + return result; +} + +void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 val; + + val = (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) ? 0 : 1; + + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_DPM_ENABLE, val); + DP_VERBOSE(p_hwfn, (QED_MSG_DCB | QED_MSG_RDMA), + "Changing DPM_EN state to %d (DCBX=%d, DB_BAR=%d)\n", + val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); +} + +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + p_hwfn->db_bar_no_edpm = true; + + qed_rdma_dpm_conf(p_hwfn, p_ptt); +} + +static int qed_rdma_start(void *rdma_cxt, + struct qed_rdma_start_in_params *params) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + struct qed_ptt *p_ptt; + int rc = -EBUSY; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "desired_cnq = %08x\n", params->desired_cnq); + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + goto err; + + rc = qed_rdma_alloc(p_hwfn); + if (rc) + goto err1; + + rc = qed_rdma_setup(p_hwfn, p_ptt, params); + if (rc) + goto err2; + + qed_ptt_release(p_hwfn, p_ptt); + p_hwfn->p_rdma_info->active = 1; + + return rc; + +err2: + qed_rdma_free(p_hwfn); +err1: + qed_ptt_release(p_hwfn, p_ptt); +err: + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "RDMA start - error, rc = %d\n", rc); + return rc; +} + +static int qed_rdma_init(struct qed_dev *cdev, + struct qed_rdma_start_in_params *params) +{ + return qed_rdma_start(QED_AFFIN_HWFN(cdev), params); +} + +static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi) +{ + struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "dpi = %08x\n", dpi); + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->dpi_map, dpi); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev, + u8 *old_mac_address, + const u8 *new_mac_address) +{ + int rc = 0; + + if (old_mac_address) + qed_llh_remove_mac_filter(cdev, 0, old_mac_address); + if (new_mac_address) + rc = qed_llh_add_mac_filter(cdev, 0, new_mac_address); + + if (rc) + DP_ERR(cdev, + "qed roce ll2 mac filter set: failed to add MAC filter\n"); + + return rc; +} + +static int qed_iwarp_set_engine_affin(struct qed_dev *cdev, bool b_reset) +{ + enum qed_eng eng; + u8 ppfid = 0; + int rc; + + /* Make sure iwarp cmt mode is enabled before setting affinity */ + if (!cdev->iwarp_cmt) + return -EINVAL; + + if (b_reset) + eng = QED_BOTH_ENG; + else + eng = cdev->l2_affin_hint ? QED_ENG1 : QED_ENG0; + + rc = qed_llh_set_ppfid_affinity(cdev, ppfid, eng); + if (rc) { + DP_NOTICE(cdev, + "Failed to set the engine affinity of ppfid %d\n", + ppfid); + return rc; + } + + DP_VERBOSE(cdev, (QED_MSG_RDMA | QED_MSG_SP), + "LLH: Set the engine affinity of non-RoCE packets as %d\n", + eng); + + return 0; +} + +static const struct qed_rdma_ops qed_rdma_ops_pass = { + .common = &qed_common_ops_pass, + .fill_dev_info = &qed_fill_rdma_dev_info, + .rdma_get_rdma_ctx = &qed_rdma_get_rdma_ctx, + .rdma_init = &qed_rdma_init, + .rdma_add_user = &qed_rdma_add_user, + .rdma_remove_user = &qed_rdma_remove_user, + .rdma_stop = &qed_rdma_stop, + .rdma_query_port = &qed_rdma_query_port, + .rdma_query_device = &qed_rdma_query_device, + .rdma_get_start_sb = &qed_rdma_get_sb_start, + .rdma_get_rdma_int = &qed_rdma_get_int, + .rdma_set_rdma_int = &qed_rdma_set_int, + .rdma_get_min_cnq_msix = &qed_rdma_get_min_cnq_msix, + .rdma_cnq_prod_update = &qed_rdma_cnq_prod_update, + .rdma_alloc_pd = &qed_rdma_alloc_pd, + .rdma_dealloc_pd = &qed_rdma_free_pd, + .rdma_alloc_xrcd = &qed_rdma_alloc_xrcd, + .rdma_dealloc_xrcd = &qed_rdma_free_xrcd, + .rdma_create_cq = &qed_rdma_create_cq, + .rdma_destroy_cq = &qed_rdma_destroy_cq, + .rdma_create_qp = &qed_rdma_create_qp, + .rdma_modify_qp = &qed_rdma_modify_qp, + .rdma_query_qp = &qed_rdma_query_qp, + .rdma_destroy_qp = &qed_rdma_destroy_qp, + .rdma_alloc_tid = &qed_rdma_alloc_tid, + .rdma_free_tid = &qed_rdma_free_tid, + .rdma_register_tid = &qed_rdma_register_tid, + .rdma_deregister_tid = &qed_rdma_deregister_tid, + .rdma_create_srq = &qed_rdma_create_srq, + .rdma_modify_srq = &qed_rdma_modify_srq, + .rdma_destroy_srq = &qed_rdma_destroy_srq, + .ll2_acquire_connection = &qed_ll2_acquire_connection, + .ll2_establish_connection = &qed_ll2_establish_connection, + .ll2_terminate_connection = &qed_ll2_terminate_connection, + .ll2_release_connection = &qed_ll2_release_connection, + .ll2_post_rx_buffer = &qed_ll2_post_rx_buffer, + .ll2_prepare_tx_packet = &qed_ll2_prepare_tx_packet, + .ll2_set_fragment_of_tx_packet = &qed_ll2_set_fragment_of_tx_packet, + .ll2_set_mac_filter = &qed_roce_ll2_set_mac_filter, + .ll2_get_stats = &qed_ll2_get_stats, + .iwarp_set_engine_affin = &qed_iwarp_set_engine_affin, + .iwarp_connect = &qed_iwarp_connect, + .iwarp_create_listen = &qed_iwarp_create_listen, + .iwarp_destroy_listen = &qed_iwarp_destroy_listen, + .iwarp_accept = &qed_iwarp_accept, + .iwarp_reject = &qed_iwarp_reject, + .iwarp_send_rtr = &qed_iwarp_send_rtr, +}; + +const struct qed_rdma_ops *qed_get_rdma_ops(void) +{ + return &qed_rdma_ops_pass; +} +EXPORT_SYMBOL(qed_get_rdma_ops); diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h new file mode 100644 index 0000000000..2753723011 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_RDMA_H +#define _QED_RDMA_H +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_if.h> +#include <linux/qed/qed_rdma_if.h> +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_iwarp.h" +#include "qed_roce.h" + +#define QED_RDMA_MAX_P_KEY (1) +#define QED_RDMA_MAX_WQE (0x7FFF) +#define QED_RDMA_MAX_SRQ_WQE_ELEM (0x7FFF) +#define QED_RDMA_PAGE_SIZE_CAPS (0xFFFFF000) +#define QED_RDMA_ACK_DELAY (15) +#define QED_RDMA_MAX_MR_SIZE (0x10000000000ULL) +#define QED_RDMA_MAX_CQS (RDMA_MAX_CQS) +#define QED_RDMA_MAX_MRS (RDMA_MAX_TIDS) +/* Add 1 for header element */ +#define QED_RDMA_MAX_SRQ_ELEM_PER_WQE (RDMA_MAX_SGE_PER_RQ_WQE + 1) +#define QED_RDMA_MAX_SGE_PER_SRQ_WQE (RDMA_MAX_SGE_PER_RQ_WQE) +#define QED_RDMA_SRQ_WQE_ELEM_SIZE (16) +#define QED_RDMA_MAX_SRQS (32 * 1024) + +#define QED_RDMA_MAX_CQE_32_BIT (0x7FFFFFFF - 1) +#define QED_RDMA_MAX_CQE_16_BIT (0x7FFF - 1) + +/* Up to 2^16 XRC Domains are supported, but the actual number of supported XRC + * SRQs is much smaller so there's no need to have that many domains. + */ +#define QED_RDMA_MAX_XRCDS (roundup_pow_of_two(RDMA_MAX_XRC_SRQS)) + +enum qed_rdma_toggle_bit { + QED_RDMA_TOGGLE_BIT_CLEAR = 0, + QED_RDMA_TOGGLE_BIT_SET = 1 +}; + +#define QED_RDMA_MAX_BMAP_NAME (10) +struct qed_bmap { + unsigned long *bitmap; + u32 max_count; + char name[QED_RDMA_MAX_BMAP_NAME]; +}; + +struct qed_rdma_info { + /* spin lock to protect bitmaps */ + spinlock_t lock; + + struct qed_bmap cq_map; + struct qed_bmap pd_map; + struct qed_bmap xrcd_map; + struct qed_bmap tid_map; + struct qed_bmap qp_map; + struct qed_bmap srq_map; + struct qed_bmap xrc_srq_map; + struct qed_bmap cid_map; + struct qed_bmap tcp_cid_map; + struct qed_bmap real_cid_map; + struct qed_bmap dpi_map; + struct qed_bmap toggle_bits; + struct qed_rdma_events events; + struct qed_rdma_device *dev; + struct qed_rdma_port *port; + u32 last_tid; + u8 num_cnqs; + u32 num_qps; + u32 num_mrs; + u32 num_srqs; + u16 srq_id_offset; + u16 queue_zone_base; + u16 max_queue_zones; + enum protocol_type proto; + struct qed_iwarp_info iwarp; + u8 active:1; +}; + +struct qed_rdma_qp { + struct regpair qp_handle; + struct regpair qp_handle_async; + u32 qpid; + u16 icid; + enum qed_roce_qp_state cur_state; + enum qed_rdma_qp_type qp_type; + enum qed_iwarp_qp_state iwarp_state; + bool use_srq; + bool signal_all; + bool fmr_and_reserved_lkey; + + bool incoming_rdma_read_en; + bool incoming_rdma_write_en; + bool incoming_atomic_en; + bool e2e_flow_control_en; + + u16 pd; + u16 pkey; + u32 dest_qp; + u16 mtu; + u16 srq_id; + u8 traffic_class_tos; + u8 hop_limit_ttl; + u16 dpi; + u32 flow_label; + bool lb_indication; + u16 vlan_id; + u32 ack_timeout; + u8 retry_cnt; + u8 rnr_retry_cnt; + u8 min_rnr_nak_timer; + bool sqd_async; + union qed_gid sgid; + union qed_gid dgid; + enum roce_mode roce_mode; + u16 udp_src_port; + u8 stats_queue; + + /* requeseter */ + u8 max_rd_atomic_req; + u32 sq_psn; + u16 sq_cq_id; + u16 sq_num_pages; + dma_addr_t sq_pbl_ptr; + void *orq; + dma_addr_t orq_phys_addr; + u8 orq_num_pages; + bool req_offloaded; + bool has_req; + + /* responder */ + u8 max_rd_atomic_resp; + u32 rq_psn; + u16 rq_cq_id; + u16 rq_num_pages; + u16 xrcd_id; + dma_addr_t rq_pbl_ptr; + void *irq; + dma_addr_t irq_phys_addr; + u8 irq_num_pages; + bool resp_offloaded; + u32 cq_prod; + bool has_resp; + + u8 remote_mac_addr[6]; + u8 local_mac_addr[6]; + + void *shared_queue; + dma_addr_t shared_queue_phys_addr; + struct qed_iwarp_ep *ep; + u8 edpm_mode; +}; + +static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp) +{ + if (qp->qp_type == QED_RDMA_QP_TYPE_XRC_TGT || + qp->qp_type == QED_RDMA_QP_TYPE_XRC_INI) + return true; + + return false; +} + +#if IS_ENABLED(CONFIG_QED_RDMA) +void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn); +void qed_rdma_info_free(struct qed_hwfn *p_hwfn); +#else +static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} +static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} +static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) + {return -EINVAL; } +static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {} +#endif + +int +qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 max_count, char *name); + +void +qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, bool check); + +int +qed_rdma_bmap_alloc_id(struct qed_hwfn *p_hwfn, + struct qed_bmap *bmap, u32 *id_num); + +void +qed_bmap_set_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +void +qed_bmap_release_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +int +qed_bmap_test_id(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap, u32 id_num); + +void qed_rdma_set_fw_mac(__le16 *p_fw_mac, const u8 *p_qed_mac); + +bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn); +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h new file mode 100644 index 0000000000..b5e35f433a --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -0,0 +1,1725 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef REG_ADDR_H +#define REG_ADDR_H + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ + 0 + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \ + 0xfff << 0) + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \ + 12 + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \ + 0xfff << 12) + +#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \ + 24 + +#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ + 0xff << 24) + +#define CDU_REG_SEGMENT0_PARAMS \ + 0x580904UL +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \ + 24 +#define CDU_REG_SEGMENT1_PARAMS \ + 0x580908UL +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \ + (0xfff << 0) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \ + 0 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \ + (0xff << 16) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \ + 16 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \ + (0xff << 24) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \ + 24 + +#define XSDM_REG_OPERATION_GEN \ + 0xf80408UL +#define NIG_REG_RX_BRB_OUT_EN \ + 0x500e18UL +#define NIG_REG_STORM_OUT_EN \ + 0x500e08UL +#define PSWRQ2_REG_L2P_VALIDATE_VFID \ + 0x240c50UL +#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \ + 0x2aae04UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ + 0x2aa16cUL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \ + 0x2aa118UL +#define PSWHST_REG_ZONE_PERMISSION_TABLE \ + 0x2a0800UL +#define BAR0_MAP_REG_MSDM_RAM \ + 0x1d00000UL +#define BAR0_MAP_REG_USDM_RAM \ + 0x1d80000UL +#define BAR0_MAP_REG_PSDM_RAM \ + 0x1f00000UL +#define BAR0_MAP_REG_TSDM_RAM \ + 0x1c80000UL +#define BAR0_MAP_REG_XSDM_RAM \ + 0x1e00000UL +#define BAR0_MAP_REG_YSDM_RAM \ + 0x1e80000UL +#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ + 0x5011f4UL +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE \ + 0x1f0164UL +#define PRS_REG_SEARCH_TCP \ + 0x1f0400UL +#define PRS_REG_SEARCH_UDP \ + 0x1f0404UL +#define PRS_REG_SEARCH_FCOE \ + 0x1f0408UL +#define PRS_REG_SEARCH_ROCE \ + 0x1f040cUL +#define PRS_REG_SEARCH_OPENFLOW \ + 0x1f0434UL +#define PRS_REG_SEARCH_TAG1 \ + 0x1f0444UL +#define PRS_REG_SEARCH_TENANT_ID \ + 0x1f044cUL +#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \ + 0x1f0a0cUL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG \ + 0x1f0410UL +#define TM_REG_PF_ENABLE_CONN \ + 0x2c043cUL +#define TM_REG_PF_ENABLE_TASK \ + 0x2c0444UL +#define TM_REG_PF_SCAN_ACTIVE_CONN \ + 0x2c04fcUL +#define TM_REG_PF_SCAN_ACTIVE_TASK \ + 0x2c0500UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x18082cUL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x180830UL +#define QM_REG_USG_CNT_PF_TX \ + 0x2f2eacUL +#define QM_REG_USG_CNT_PF_OTHER \ + 0x2f2eb0UL +#define DORQ_REG_PF_DB_ENABLE \ + 0x100508UL +#define DORQ_REG_VF_USAGE_CNT \ + 0x1009c4UL +#define QM_REG_PF_EN \ + 0x2f2ea4UL +#define QM_REG_RLGLBLUPPERBOUND \ + 0x2f3c00UL +#define TCFC_REG_WEAK_ENABLE_VF \ + 0x2d0704UL +#define TCFC_REG_STRONG_ENABLE_PF \ + 0x2d0708UL +#define TCFC_REG_STRONG_ENABLE_VF \ + 0x2d070cUL +#define CCFC_REG_WEAK_ENABLE_VF \ + 0x2e0704UL +#define CCFC_REG_STRONG_ENABLE_PF \ + 0x2e0708UL +#define PGLUE_B_REG_PGL_ADDR_88_F0_BB \ + 0x2aa404UL +#define PGLUE_B_REG_PGL_ADDR_8C_F0_BB \ + 0x2aa408UL +#define PGLUE_B_REG_PGL_ADDR_90_F0_BB \ + 0x2aa40cUL +#define PGLUE_B_REG_PGL_ADDR_94_F0_BB \ + 0x2aa410UL +#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ + 0x2aa138UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \ + 0x2aa174UL +#define MISC_REG_GEN_PURP_CR0 \ + 0x008c80UL +#define MCP_REG_SCRATCH \ + 0xe20000UL +#define MCP_REG_SCRATCH_SIZE \ + 57344 +#define CNIG_REG_NW_PORT_MODE_BB \ + 0x218200UL +#define MISCS_REG_CHIP_NUM \ + 0x00976cUL +#define MISCS_REG_CHIP_REV \ + 0x009770UL +#define MISCS_REG_CMT_ENABLED_FOR_PAIR \ + 0x00971cUL +#define MISCS_REG_CHIP_TEST_REG \ + 0x009778UL +#define MISCS_REG_CHIP_METAL \ + 0x009774UL +#define MISCS_REG_FUNCTION_HIDE \ + 0x0096f0UL +#define BRB_REG_HEADER_SIZE \ + 0x340804UL +#define BTB_REG_HEADER_SIZE \ + 0xdb0804UL +#define CAU_REG_LONG_TIMEOUT_THRESHOLD \ + 0x1c0708UL +#define CCFC_REG_ACTIVITY_COUNTER \ + 0x2e8800UL +#define CCFC_REG_STRONG_ENABLE_VF \ + 0x2e070cUL +#define CDU_REG_CCFC_CTX_VALID0 \ + 0x580400UL +#define CDU_REG_CCFC_CTX_VALID1 \ + 0x580404UL +#define CDU_REG_TCFC_CTX_VALID0 \ + 0x580408UL +#define CDU_REG_CID_ADDR_PARAMS \ + 0x580900UL +#define DBG_REG_CLIENT_ENABLE \ + 0x010004UL +#define DBG_REG_TIMESTAMP_VALID_EN \ + 0x010b58UL +#define DMAE_REG_INIT \ + 0x00c000UL +#define DORQ_REG_IFEN \ + 0x100040UL +#define DORQ_REG_TAG1_OVRD_MODE \ + 0x1008b4UL +#define DORQ_REG_PF_PCP_BB_K2 \ + 0x1008c4UL +#define DORQ_REG_PF_EXT_VID_BB_K2 \ + 0x1008c8UL +#define DORQ_REG_DB_DROP_REASON \ + 0x100a2cUL +#define DORQ_REG_DB_DROP_DETAILS \ + 0x100a24UL +#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \ + 0x100a1cUL +#define GRC_REG_TIMEOUT_EN \ + 0x050404UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \ + 0x050054UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \ + 0x05004cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \ + 0x050050UL +#define IGU_REG_BLOCK_CONFIGURATION \ + 0x180040UL +#define MCM_REG_INIT \ + 0x1200000UL +#define MCP2_REG_DBG_DWORD_ENABLE \ + 0x052404UL +#define MISC_REG_PORT_MODE \ + 0x008c00UL +#define MISCS_REG_CLK_100G_MODE \ + 0x009070UL +#define MSDM_REG_ENABLE_IN1 \ + 0xfc0004UL +#define MSEM_REG_ENABLE_IN \ + 0x1800004UL +#define NIG_REG_CM_HDR \ + 0x500840UL +#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \ + 0x50196cUL +#define NIG_REG_LLH_PPFID2PFID_TBL_0 \ + 0x501970UL +#define NIG_REG_LLH_ENG_CLS_ROCE_QP_SEL \ + 0x50 +#define NIG_REG_LLH_CLS_TYPE_DUALMODE \ + 0x501964UL +#define NIG_REG_LLH_FUNC_TAG_EN 0x5019b0UL +#define NIG_REG_LLH_FUNC_TAG_VALUE 0x5019d0UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE \ + 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE_SIZE \ + 32 +#define NIG_REG_LLH_FUNC_FILTER_EN \ + 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE \ + 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE \ + 0x501ac0UL +#define NIG_REG_LLH_FUNC_FILTER_MODE_SIZE \ + 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE \ + 0x501b00UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_SIZE \ + 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL \ + 0x501b40UL +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_SIZE \ + 16 +#define NCSI_REG_CONFIG \ + 0x040200UL +#define PBF_REG_INIT \ + 0xd80000UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \ + 0xd806c8UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \ + 0xd806ccUL +#define PTU_REG_ATC_INIT_ARRAY \ + 0x560000UL +#define PCM_REG_INIT \ + 0x1100000UL +#define PGLUE_B_REG_ADMIN_PER_PF_REGION \ + 0x2a9000UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \ + 0x2aa150UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \ + 0x2aa144UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \ + 0x2aa148UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS \ + 0x2aa14cUL +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \ + 0x2aa154UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \ + 0x2aa158UL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS \ + 0x2aa15cUL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \ + 0x2aa160UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \ + 0x2aa164UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \ + 0x2aa54cUL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \ + 0x2aa544UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \ + 0x2aa548UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \ + 0x2aae74UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \ + 0x2aae78UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \ + 0x2aae7cUL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \ + 0x2aae80UL +#define PGLUE_B_REG_LATCHED_ERRORS_CLR \ + 0x2aa3bcUL +#define PRM_REG_DISABLE_PRM \ + 0x230000UL +#define PRS_REG_SOFT_RST \ + 0x1f0000UL +#define PRS_REG_MSG_INFO \ + 0x1f0a1cUL +#define PRS_REG_ROCE_DEST_QP_MAX_PF \ + 0x1f0430UL +#define PRS_REG_USE_LIGHT_L2 \ + 0x1f096cUL +#define PSDM_REG_ENABLE_IN1 \ + 0xfa0004UL +#define PSEM_REG_ENABLE_IN \ + 0x1600004UL +#define PSWRQ_REG_DBG_SELECT \ + 0x280020UL +#define PSWRQ2_REG_CDUT_P_SIZE \ + 0x24000cUL +#define PSWRQ2_REG_ILT_MEMORY \ + 0x260000UL +#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \ + 15200 +#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \ + 22000 +#define PSWHST_REG_DISCARD_INTERNAL_WRITES \ + 0x2a0040UL +#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ + 0x29e050UL +#define PSWHST_REG_INCORRECT_ACCESS_VALID \ + 0x2a0070UL +#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \ + 0x2a0074UL +#define PSWHST_REG_INCORRECT_ACCESS_DATA \ + 0x2a0068UL +#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \ + 0x2a006cUL +#define PSWRD_REG_DBG_SELECT \ + 0x29c040UL +#define PSWRD2_REG_CONF11 \ + 0x29d064UL +#define PSWWR_REG_USDM_FULL_TH \ + 0x29a040UL +#define PSWWR2_REG_CDU_FULL_TH2 \ + 0x29b040UL +#define QM_REG_MAXPQSIZE_0 \ + 0x2f0434UL +#define RSS_REG_RSS_INIT_EN \ + 0x238804UL +#define RDIF_REG_STOP_ON_ERROR \ + 0x300040UL +#define RDIF_REG_DEBUG_ERROR_INFO \ + 0x300400UL +#define RDIF_REG_DEBUG_ERROR_INFO_SIZE \ + 64 +#define SRC_REG_SOFT_RST \ + 0x23874cUL +#define TCFC_REG_ACTIVITY_COUNTER \ + 0x2d8800UL +#define TCM_REG_INIT \ + 0x1180000UL +#define TM_REG_PXP_READ_DATA_FIFO_INIT \ + 0x2c0014UL +#define TSDM_REG_ENABLE_IN1 \ + 0xfb0004UL +#define TSEM_REG_ENABLE_IN \ + 0x1700004UL +#define TDIF_REG_STOP_ON_ERROR \ + 0x310040UL +#define TDIF_REG_DEBUG_ERROR_INFO \ + 0x310400UL +#define TDIF_REG_DEBUG_ERROR_INFO_SIZE \ + 64 +#define UCM_REG_INIT \ + 0x1280000UL +#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ + 0x051004UL +#define USDM_REG_ENABLE_IN1 \ + 0xfd0004UL +#define USEM_REG_ENABLE_IN \ + 0x1900004UL +#define XCM_REG_INIT \ + 0x1000000UL +#define XSDM_REG_ENABLE_IN1 \ + 0xf80004UL +#define XSEM_REG_ENABLE_IN \ + 0x1400004UL +#define YCM_REG_INIT \ + 0x1080000UL +#define YSDM_REG_ENABLE_IN1 \ + 0xf90004UL +#define YSEM_REG_ENABLE_IN \ + 0x1500004UL +#define XYLD_REG_SCBD_STRICT_PRIO \ + 0x4c0000UL +#define TMLD_REG_SCBD_STRICT_PRIO \ + 0x4d0000UL +#define MULD_REG_SCBD_STRICT_PRIO \ + 0x4e0000UL +#define YULD_REG_SCBD_STRICT_PRIO \ + 0x4c8000UL +#define MISC_REG_SHARED_MEM_ADDR \ + 0x008c20UL +#define DMAE_REG_GO_C0 \ + 0x00c048UL +#define DMAE_REG_GO_C1 \ + 0x00c04cUL +#define DMAE_REG_GO_C2 \ + 0x00c050UL +#define DMAE_REG_GO_C3 \ + 0x00c054UL +#define DMAE_REG_GO_C4 \ + 0x00c058UL +#define DMAE_REG_GO_C5 \ + 0x00c05cUL +#define DMAE_REG_GO_C6 \ + 0x00c060UL +#define DMAE_REG_GO_C7 \ + 0x00c064UL +#define DMAE_REG_GO_C8 \ + 0x00c068UL +#define DMAE_REG_GO_C9 \ + 0x00c06cUL +#define DMAE_REG_GO_C10 \ + 0x00c070UL +#define DMAE_REG_GO_C11 \ + 0x00c074UL +#define DMAE_REG_GO_C12 \ + 0x00c078UL +#define DMAE_REG_GO_C13 \ + 0x00c07cUL +#define DMAE_REG_GO_C14 \ + 0x00c080UL +#define DMAE_REG_GO_C15 \ + 0x00c084UL +#define DMAE_REG_GO_C16 \ + 0x00c088UL +#define DMAE_REG_GO_C17 \ + 0x00c08cUL +#define DMAE_REG_GO_C18 \ + 0x00c090UL +#define DMAE_REG_GO_C19 \ + 0x00c094UL +#define DMAE_REG_GO_C20 \ + 0x00c098UL +#define DMAE_REG_GO_C21 \ + 0x00c09cUL +#define DMAE_REG_GO_C22 \ + 0x00c0a0UL +#define DMAE_REG_GO_C23 \ + 0x00c0a4UL +#define DMAE_REG_GO_C24 \ + 0x00c0a8UL +#define DMAE_REG_GO_C25 \ + 0x00c0acUL +#define DMAE_REG_GO_C26 \ + 0x00c0b0UL +#define DMAE_REG_GO_C27 \ + 0x00c0b4UL +#define DMAE_REG_GO_C28 \ + 0x00c0b8UL +#define DMAE_REG_GO_C29 \ + 0x00c0bcUL +#define DMAE_REG_GO_C30 \ + 0x00c0c0UL +#define DMAE_REG_GO_C31 \ + 0x00c0c4UL +#define DMAE_REG_CMD_MEM \ + 0x00c800UL +#define QM_REG_MAXPQSIZETXSEL_0 \ + 0x2f0440UL +#define QM_REG_SDMCMDREADY \ + 0x2f1e10UL +#define QM_REG_SDMCMDADDR \ + 0x2f1e04UL +#define QM_REG_SDMCMDDATALSB \ + 0x2f1e08UL +#define QM_REG_SDMCMDDATAMSB \ + 0x2f1e0cUL +#define QM_REG_SDMCMDGO \ + 0x2f1e14UL +#define QM_REG_RLPFCRD \ + 0x2f4d80UL +#define QM_REG_RLPFINCVAL \ + 0x2f4c80UL +#define QM_REG_RLGLBLCRD \ + 0x2f4400UL +#define QM_REG_RLGLBLINCVAL \ + 0x2f3400UL +#define IGU_REG_ATTENTION_ENABLE \ + 0x18083cUL +#define IGU_REG_ATTN_MSG_ADDR_L \ + 0x180820UL +#define IGU_REG_ATTN_MSG_ADDR_H \ + 0x180824UL +#define MISC_REG_AEU_GENERAL_ATTN_0 \ + 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_32 \ + 0x008480UL +#define MISC_REG_AEU_GENERAL_ATTN_35 \ + 0x00848cUL +#define CAU_REG_SB_ADDR_MEMORY \ + 0x1c8000UL +#define CAU_REG_SB_VAR_MEMORY \ + 0x1c6000UL +#define CAU_REG_PI_MEMORY \ + 0x1d0000UL +#define IGU_REG_PF_CONFIGURATION \ + 0x180800UL +#define IGU_REG_VF_CONFIGURATION \ + 0x180804UL +#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ + 0x00849cUL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 \ + 0x0084a8UL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32 \ + (0x1UL << 0) +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32_SHIFT \ + 0 +#define MISC_REG_AEU_AFTER_INVERT_1_IGU \ + 0x0087b4UL +#define MISC_REG_AEU_MASK_ATTN_IGU \ + 0x008494UL +#define IGU_REG_CLEANUP_STATUS_0 \ + 0x180980UL +#define IGU_REG_CLEANUP_STATUS_1 \ + 0x180a00UL +#define IGU_REG_CLEANUP_STATUS_2 \ + 0x180a80UL +#define IGU_REG_CLEANUP_STATUS_3 \ + 0x180b00UL +#define IGU_REG_CLEANUP_STATUS_4 \ + 0x180b80UL +#define IGU_REG_COMMAND_REG_32LSB_DATA \ + 0x180840UL +#define IGU_REG_COMMAND_REG_CTRL \ + 0x180848UL +#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \ + 0x1 << 1) +#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ + 0x1 << 0) +#define IGU_REG_PRODUCER_MEMORY 0x182000UL +#define IGU_REG_CONSUMER_MEM 0x183000UL +#define IGU_REG_MAPPING_MEMORY \ + 0x184000UL +#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \ + 0x180408UL +#define IGU_REG_WRITE_DONE_PENDING \ + 0x180900UL +#define MISCS_REG_GENERIC_POR_0 \ + 0x0096d4UL +#define MCP_REG_NVM_CFG4 \ + 0xe0642cUL +#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \ + 0x7 << 0) +#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ + 0 +#define MCP_REG_CPU_STATE \ + 0xe05004UL +#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10) +#define MCP_REG_CPU_EVENT_MASK \ + 0xe05008UL +#define MCP_REG_CPU_PROGRAM_COUNTER 0xe0501cUL +#define PGLUE_B_REG_PF_BAR0_SIZE \ + 0x2aae60UL +#define PGLUE_B_REG_PF_BAR1_SIZE \ + 0x2aae64UL +#define PGLUE_B_REG_VF_BAR1_SIZE 0x2aae68UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_GRE_PROTOCOL 0x1f0734UL +#define PRS_REG_VXLAN_PORT 0x1f0738UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL + +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0) +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1) +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1 +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2) +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 + +#define NIG_REG_VXLAN_CTRL 0x50105cUL +#define PBF_REG_VXLAN_PORT 0xd80518UL +#define PBF_REG_NGE_PORT 0xd8051cUL +#define PRS_REG_NGE_PORT 0x1f086cUL +#define NIG_REG_NGE_PORT 0x508b38UL + +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL + +#define NIG_REG_NGE_IP_ENABLE 0x508b28UL +#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL +#define NIG_REG_NGE_COMP_VER 0x508b30UL +#define PBF_REG_NGE_COMP_VER 0xd80524UL +#define PRS_REG_NGE_COMP_VER 0x1f0878UL + +#define QM_REG_WFQPFWEIGHT 0x2f4e80UL +#define QM_REG_WFQVPWEIGHT 0x2fa000UL +#define QM_REG_WFQVPUPPERBOUND \ + 0x2fb000UL +#define QM_REG_WFQVPCRD \ + 0x2fc000UL +#define PGLCS_REG_DBG_SELECT_K2_E5 \ + 0x001d14UL +#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x001d18UL +#define PGLCS_REG_DBG_SHIFT_K2_E5 \ + 0x001d1cUL +#define PGLCS_REG_DBG_FORCE_VALID_K2_E5 \ + 0x001d20UL +#define PGLCS_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x001d24UL +#define MISC_REG_RESET_PL_PDA_VMAIN_1 \ + 0x008070UL +#define MISC_REG_RESET_PL_PDA_VMAIN_2 \ + 0x008080UL +#define MISC_REG_RESET_PL_PDA_VAUX \ + 0x008090UL +#define MISCS_REG_RESET_PL_UA \ + 0x009050UL +#define MISCS_REG_RESET_PL_HV \ + 0x009060UL +#define MISCS_REG_RESET_PL_HV_2_K2_E5 \ + 0x009150UL +#define DMAE_REG_DBG_SELECT \ + 0x00c510UL +#define DMAE_REG_DBG_DWORD_ENABLE \ + 0x00c514UL +#define DMAE_REG_DBG_SHIFT \ + 0x00c518UL +#define DMAE_REG_DBG_FORCE_VALID \ + 0x00c51cUL +#define DMAE_REG_DBG_FORCE_FRAME \ + 0x00c520UL +#define NCSI_REG_DBG_SELECT \ + 0x040474UL +#define NCSI_REG_DBG_DWORD_ENABLE \ + 0x040478UL +#define NCSI_REG_DBG_SHIFT \ + 0x04047cUL +#define NCSI_REG_DBG_FORCE_VALID \ + 0x040480UL +#define NCSI_REG_DBG_FORCE_FRAME \ + 0x040484UL +#define GRC_REG_DBG_SELECT \ + 0x0500a4UL +#define GRC_REG_DBG_DWORD_ENABLE \ + 0x0500a8UL +#define GRC_REG_DBG_SHIFT \ + 0x0500acUL +#define GRC_REG_DBG_FORCE_VALID \ + 0x0500b0UL +#define GRC_REG_DBG_FORCE_FRAME \ + 0x0500b4UL +#define UMAC_REG_DBG_SELECT_K2_E5 \ + 0x051094UL +#define UMAC_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x051098UL +#define UMAC_REG_DBG_SHIFT_K2_E5 \ + 0x05109cUL +#define UMAC_REG_DBG_FORCE_VALID_K2_E5 \ + 0x0510a0UL +#define UMAC_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x0510a4UL +#define MCP2_REG_DBG_SELECT \ + 0x052400UL +#define MCP2_REG_DBG_DWORD_ENABLE \ + 0x052404UL +#define MCP2_REG_DBG_SHIFT \ + 0x052408UL +#define MCP2_REG_DBG_FORCE_VALID \ + 0x052440UL +#define MCP2_REG_DBG_FORCE_FRAME \ + 0x052444UL +#define PCIE_REG_DBG_SELECT \ + 0x0547e8UL +#define PCIE_REG_DBG_DWORD_ENABLE \ + 0x0547ecUL +#define PCIE_REG_DBG_SHIFT \ + 0x0547f0UL +#define PCIE_REG_DBG_FORCE_VALID \ + 0x0547f4UL +#define PCIE_REG_DBG_FORCE_FRAME \ + 0x0547f8UL +#define DORQ_REG_DBG_SELECT \ + 0x100ad0UL +#define DORQ_REG_DBG_DWORD_ENABLE \ + 0x100ad4UL +#define DORQ_REG_DBG_SHIFT \ + 0x100ad8UL +#define DORQ_REG_DBG_FORCE_VALID \ + 0x100adcUL +#define DORQ_REG_DBG_FORCE_FRAME \ + 0x100ae0UL +#define IGU_REG_DBG_SELECT \ + 0x181578UL +#define IGU_REG_DBG_DWORD_ENABLE \ + 0x18157cUL +#define IGU_REG_DBG_SHIFT \ + 0x181580UL +#define IGU_REG_DBG_FORCE_VALID \ + 0x181584UL +#define IGU_REG_DBG_FORCE_FRAME \ + 0x181588UL +#define CAU_REG_DBG_SELECT \ + 0x1c0ea8UL +#define CAU_REG_DBG_DWORD_ENABLE \ + 0x1c0eacUL +#define CAU_REG_DBG_SHIFT \ + 0x1c0eb0UL +#define CAU_REG_DBG_FORCE_VALID \ + 0x1c0eb4UL +#define CAU_REG_DBG_FORCE_FRAME \ + 0x1c0eb8UL +#define PRS_REG_DBG_SELECT \ + 0x1f0b6cUL +#define PRS_REG_DBG_DWORD_ENABLE \ + 0x1f0b70UL +#define PRS_REG_DBG_SHIFT \ + 0x1f0b74UL +#define PRS_REG_DBG_FORCE_VALID \ + 0x1f0ba0UL +#define PRS_REG_DBG_FORCE_FRAME \ + 0x1f0ba4UL +#define CNIG_REG_DBG_SELECT_K2_E5 \ + 0x218254UL +#define CNIG_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x218258UL +#define CNIG_REG_DBG_SHIFT_K2_E5 \ + 0x21825cUL +#define CNIG_REG_DBG_FORCE_VALID_K2_E5 \ + 0x218260UL +#define CNIG_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x218264UL +#define PRM_REG_DBG_SELECT \ + 0x2306a8UL +#define PRM_REG_DBG_DWORD_ENABLE \ + 0x2306acUL +#define PRM_REG_DBG_SHIFT \ + 0x2306b0UL +#define PRM_REG_DBG_FORCE_VALID \ + 0x2306b4UL +#define PRM_REG_DBG_FORCE_FRAME \ + 0x2306b8UL +#define SRC_REG_DBG_SELECT \ + 0x238700UL +#define SRC_REG_DBG_DWORD_ENABLE \ + 0x238704UL +#define SRC_REG_DBG_SHIFT \ + 0x238708UL +#define SRC_REG_DBG_FORCE_VALID \ + 0x23870cUL +#define SRC_REG_DBG_FORCE_FRAME \ + 0x238710UL +#define RSS_REG_DBG_SELECT \ + 0x238c4cUL +#define RSS_REG_DBG_DWORD_ENABLE \ + 0x238c50UL +#define RSS_REG_DBG_SHIFT \ + 0x238c54UL +#define RSS_REG_DBG_FORCE_VALID \ + 0x238c58UL +#define RSS_REG_DBG_FORCE_FRAME \ + 0x238c5cUL +#define RPB_REG_DBG_SELECT \ + 0x23c728UL +#define RPB_REG_DBG_DWORD_ENABLE \ + 0x23c72cUL +#define RPB_REG_DBG_SHIFT \ + 0x23c730UL +#define RPB_REG_DBG_FORCE_VALID \ + 0x23c734UL +#define RPB_REG_DBG_FORCE_FRAME \ + 0x23c738UL +#define PSWRQ2_REG_DBG_SELECT \ + 0x240100UL +#define PSWRQ2_REG_DBG_DWORD_ENABLE \ + 0x240104UL +#define PSWRQ2_REG_DBG_SHIFT \ + 0x240108UL +#define PSWRQ2_REG_DBG_FORCE_VALID \ + 0x24010cUL +#define PSWRQ2_REG_DBG_FORCE_FRAME \ + 0x240110UL +#define PSWRQ_REG_DBG_SELECT \ + 0x280020UL +#define PSWRQ_REG_DBG_DWORD_ENABLE \ + 0x280024UL +#define PSWRQ_REG_DBG_SHIFT \ + 0x280028UL +#define PSWRQ_REG_DBG_FORCE_VALID \ + 0x28002cUL +#define PSWRQ_REG_DBG_FORCE_FRAME \ + 0x280030UL +#define PSWWR_REG_DBG_SELECT \ + 0x29a084UL +#define PSWWR_REG_DBG_DWORD_ENABLE \ + 0x29a088UL +#define PSWWR_REG_DBG_SHIFT \ + 0x29a08cUL +#define PSWWR_REG_DBG_FORCE_VALID \ + 0x29a090UL +#define PSWWR_REG_DBG_FORCE_FRAME \ + 0x29a094UL +#define PSWRD_REG_DBG_SELECT \ + 0x29c040UL +#define PSWRD_REG_DBG_DWORD_ENABLE \ + 0x29c044UL +#define PSWRD_REG_DBG_SHIFT \ + 0x29c048UL +#define PSWRD_REG_DBG_FORCE_VALID \ + 0x29c04cUL +#define PSWRD_REG_DBG_FORCE_FRAME \ + 0x29c050UL +#define PSWRD2_REG_DBG_SELECT \ + 0x29d400UL +#define PSWRD2_REG_DBG_DWORD_ENABLE \ + 0x29d404UL +#define PSWRD2_REG_DBG_SHIFT \ + 0x29d408UL +#define PSWRD2_REG_DBG_FORCE_VALID \ + 0x29d40cUL +#define PSWRD2_REG_DBG_FORCE_FRAME \ + 0x29d410UL +#define PSWHST2_REG_DBG_SELECT \ + 0x29e058UL +#define PSWHST2_REG_DBG_DWORD_ENABLE \ + 0x29e05cUL +#define PSWHST2_REG_DBG_SHIFT \ + 0x29e060UL +#define PSWHST2_REG_DBG_FORCE_VALID \ + 0x29e064UL +#define PSWHST2_REG_DBG_FORCE_FRAME \ + 0x29e068UL +#define PSWHST_REG_DBG_SELECT \ + 0x2a0100UL +#define PSWHST_REG_DBG_DWORD_ENABLE \ + 0x2a0104UL +#define PSWHST_REG_DBG_SHIFT \ + 0x2a0108UL +#define PSWHST_REG_DBG_FORCE_VALID \ + 0x2a010cUL +#define PSWHST_REG_DBG_FORCE_FRAME \ + 0x2a0110UL +#define PGLUE_B_REG_DBG_SELECT \ + 0x2a8400UL +#define PGLUE_B_REG_DBG_DWORD_ENABLE \ + 0x2a8404UL +#define PGLUE_B_REG_DBG_SHIFT \ + 0x2a8408UL +#define PGLUE_B_REG_DBG_FORCE_VALID \ + 0x2a840cUL +#define PGLUE_B_REG_DBG_FORCE_FRAME \ + 0x2a8410UL +#define TM_REG_DBG_SELECT \ + 0x2c07a8UL +#define TM_REG_DBG_DWORD_ENABLE \ + 0x2c07acUL +#define TM_REG_DBG_SHIFT \ + 0x2c07b0UL +#define TM_REG_DBG_FORCE_VALID \ + 0x2c07b4UL +#define TM_REG_DBG_FORCE_FRAME \ + 0x2c07b8UL +#define TCFC_REG_DBG_SELECT \ + 0x2d0500UL +#define TCFC_REG_DBG_DWORD_ENABLE \ + 0x2d0504UL +#define TCFC_REG_DBG_SHIFT \ + 0x2d0508UL +#define TCFC_REG_DBG_FORCE_VALID \ + 0x2d050cUL +#define TCFC_REG_DBG_FORCE_FRAME \ + 0x2d0510UL +#define CCFC_REG_DBG_SELECT \ + 0x2e0500UL +#define CCFC_REG_DBG_DWORD_ENABLE \ + 0x2e0504UL +#define CCFC_REG_DBG_SHIFT \ + 0x2e0508UL +#define CCFC_REG_DBG_FORCE_VALID \ + 0x2e050cUL +#define CCFC_REG_DBG_FORCE_FRAME \ + 0x2e0510UL +#define QM_REG_DBG_SELECT \ + 0x2f2e74UL +#define QM_REG_DBG_DWORD_ENABLE \ + 0x2f2e78UL +#define QM_REG_DBG_SHIFT \ + 0x2f2e7cUL +#define QM_REG_DBG_FORCE_VALID \ + 0x2f2e80UL +#define QM_REG_DBG_FORCE_FRAME \ + 0x2f2e84UL +#define RDIF_REG_DBG_SELECT \ + 0x300500UL +#define RDIF_REG_DBG_DWORD_ENABLE \ + 0x300504UL +#define RDIF_REG_DBG_SHIFT \ + 0x300508UL +#define RDIF_REG_DBG_FORCE_VALID \ + 0x30050cUL +#define RDIF_REG_DBG_FORCE_FRAME \ + 0x300510UL +#define TDIF_REG_DBG_SELECT \ + 0x310500UL +#define TDIF_REG_DBG_DWORD_ENABLE \ + 0x310504UL +#define TDIF_REG_DBG_SHIFT \ + 0x310508UL +#define TDIF_REG_DBG_FORCE_VALID \ + 0x31050cUL +#define TDIF_REG_DBG_FORCE_FRAME \ + 0x310510UL +#define BRB_REG_DBG_SELECT \ + 0x340ed0UL +#define BRB_REG_DBG_DWORD_ENABLE \ + 0x340ed4UL +#define BRB_REG_DBG_SHIFT \ + 0x340ed8UL +#define BRB_REG_DBG_FORCE_VALID \ + 0x340edcUL +#define BRB_REG_DBG_FORCE_FRAME \ + 0x340ee0UL +#define XYLD_REG_DBG_SELECT \ + 0x4c1600UL +#define XYLD_REG_DBG_DWORD_ENABLE \ + 0x4c1604UL +#define XYLD_REG_DBG_SHIFT \ + 0x4c1608UL +#define XYLD_REG_DBG_FORCE_VALID \ + 0x4c160cUL +#define XYLD_REG_DBG_FORCE_FRAME \ + 0x4c1610UL +#define YULD_REG_DBG_SELECT_BB_K2 \ + 0x4c9600UL +#define YULD_REG_DBG_DWORD_ENABLE_BB_K2 \ + 0x4c9604UL +#define YULD_REG_DBG_SHIFT_BB_K2 \ + 0x4c9608UL +#define YULD_REG_DBG_FORCE_VALID_BB_K2 \ + 0x4c960cUL +#define YULD_REG_DBG_FORCE_FRAME_BB_K2 \ + 0x4c9610UL +#define TMLD_REG_DBG_SELECT \ + 0x4d1600UL +#define TMLD_REG_DBG_DWORD_ENABLE \ + 0x4d1604UL +#define TMLD_REG_DBG_SHIFT \ + 0x4d1608UL +#define TMLD_REG_DBG_FORCE_VALID \ + 0x4d160cUL +#define TMLD_REG_DBG_FORCE_FRAME \ + 0x4d1610UL +#define MULD_REG_DBG_SELECT \ + 0x4e1600UL +#define MULD_REG_DBG_DWORD_ENABLE \ + 0x4e1604UL +#define MULD_REG_DBG_SHIFT \ + 0x4e1608UL +#define MULD_REG_DBG_FORCE_VALID \ + 0x4e160cUL +#define MULD_REG_DBG_FORCE_FRAME \ + 0x4e1610UL +#define NIG_REG_DBG_SELECT \ + 0x502140UL +#define NIG_REG_DBG_DWORD_ENABLE \ + 0x502144UL +#define NIG_REG_DBG_SHIFT \ + 0x502148UL +#define NIG_REG_DBG_FORCE_VALID \ + 0x50214cUL +#define NIG_REG_DBG_FORCE_FRAME \ + 0x502150UL +#define BMB_REG_DBG_SELECT \ + 0x540a7cUL +#define BMB_REG_DBG_DWORD_ENABLE \ + 0x540a80UL +#define BMB_REG_DBG_SHIFT \ + 0x540a84UL +#define BMB_REG_DBG_FORCE_VALID \ + 0x540a88UL +#define BMB_REG_DBG_FORCE_FRAME \ + 0x540a8cUL +#define PTU_REG_DBG_SELECT \ + 0x560100UL +#define PTU_REG_DBG_DWORD_ENABLE \ + 0x560104UL +#define PTU_REG_DBG_SHIFT \ + 0x560108UL +#define PTU_REG_DBG_FORCE_VALID \ + 0x56010cUL +#define PTU_REG_DBG_FORCE_FRAME \ + 0x560110UL +#define CDU_REG_DBG_SELECT \ + 0x580704UL +#define CDU_REG_DBG_DWORD_ENABLE \ + 0x580708UL +#define CDU_REG_DBG_SHIFT \ + 0x58070cUL +#define CDU_REG_DBG_FORCE_VALID \ + 0x580710UL +#define CDU_REG_DBG_FORCE_FRAME \ + 0x580714UL +#define WOL_REG_DBG_SELECT_K2_E5 \ + 0x600140UL +#define WOL_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x600144UL +#define WOL_REG_DBG_SHIFT_K2_E5 \ + 0x600148UL +#define WOL_REG_DBG_FORCE_VALID_K2_E5 \ + 0x60014cUL +#define WOL_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x600150UL +#define BMBN_REG_DBG_SELECT_K2_E5 \ + 0x610140UL +#define BMBN_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x610144UL +#define BMBN_REG_DBG_SHIFT_K2_E5 \ + 0x610148UL +#define BMBN_REG_DBG_FORCE_VALID_K2_E5 \ + 0x61014cUL +#define BMBN_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x610150UL +#define NWM_REG_DBG_SELECT_K2_E5 \ + 0x8000ecUL +#define NWM_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x8000f0UL +#define NWM_REG_DBG_SHIFT_K2_E5 \ + 0x8000f4UL +#define NWM_REG_DBG_FORCE_VALID_K2_E5 \ + 0x8000f8UL +#define NWM_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x8000fcUL +#define PBF_REG_DBG_SELECT \ + 0xd80060UL +#define PBF_REG_DBG_DWORD_ENABLE \ + 0xd80064UL +#define PBF_REG_DBG_SHIFT \ + 0xd80068UL +#define PBF_REG_DBG_FORCE_VALID \ + 0xd8006cUL +#define PBF_REG_DBG_FORCE_FRAME \ + 0xd80070UL +#define PBF_PB1_REG_DBG_SELECT \ + 0xda0728UL +#define PBF_PB1_REG_DBG_DWORD_ENABLE \ + 0xda072cUL +#define PBF_PB1_REG_DBG_SHIFT \ + 0xda0730UL +#define PBF_PB1_REG_DBG_FORCE_VALID \ + 0xda0734UL +#define PBF_PB1_REG_DBG_FORCE_FRAME \ + 0xda0738UL +#define PBF_PB2_REG_DBG_SELECT \ + 0xda4728UL +#define PBF_PB2_REG_DBG_DWORD_ENABLE \ + 0xda472cUL +#define PBF_PB2_REG_DBG_SHIFT \ + 0xda4730UL +#define PBF_PB2_REG_DBG_FORCE_VALID \ + 0xda4734UL +#define PBF_PB2_REG_DBG_FORCE_FRAME \ + 0xda4738UL +#define BTB_REG_DBG_SELECT \ + 0xdb08c8UL +#define BTB_REG_DBG_DWORD_ENABLE \ + 0xdb08ccUL +#define BTB_REG_DBG_SHIFT \ + 0xdb08d0UL +#define BTB_REG_DBG_FORCE_VALID \ + 0xdb08d4UL +#define BTB_REG_DBG_FORCE_FRAME \ + 0xdb08d8UL +#define XSDM_REG_DBG_SELECT \ + 0xf80e28UL +#define XSDM_REG_DBG_DWORD_ENABLE \ + 0xf80e2cUL +#define XSDM_REG_DBG_SHIFT \ + 0xf80e30UL +#define XSDM_REG_DBG_FORCE_VALID \ + 0xf80e34UL +#define XSDM_REG_DBG_FORCE_FRAME \ + 0xf80e38UL +#define YSDM_REG_DBG_SELECT \ + 0xf90e28UL +#define YSDM_REG_DBG_DWORD_ENABLE \ + 0xf90e2cUL +#define YSDM_REG_DBG_SHIFT \ + 0xf90e30UL +#define YSDM_REG_DBG_FORCE_VALID \ + 0xf90e34UL +#define YSDM_REG_DBG_FORCE_FRAME \ + 0xf90e38UL +#define PSDM_REG_DBG_SELECT \ + 0xfa0e28UL +#define PSDM_REG_DBG_DWORD_ENABLE \ + 0xfa0e2cUL +#define PSDM_REG_DBG_SHIFT \ + 0xfa0e30UL +#define PSDM_REG_DBG_FORCE_VALID \ + 0xfa0e34UL +#define PSDM_REG_DBG_FORCE_FRAME \ + 0xfa0e38UL +#define TSDM_REG_DBG_SELECT \ + 0xfb0e28UL +#define TSDM_REG_DBG_DWORD_ENABLE \ + 0xfb0e2cUL +#define TSDM_REG_DBG_SHIFT \ + 0xfb0e30UL +#define TSDM_REG_DBG_FORCE_VALID \ + 0xfb0e34UL +#define TSDM_REG_DBG_FORCE_FRAME \ + 0xfb0e38UL +#define MSDM_REG_DBG_SELECT \ + 0xfc0e28UL +#define MSDM_REG_DBG_DWORD_ENABLE \ + 0xfc0e2cUL +#define MSDM_REG_DBG_SHIFT \ + 0xfc0e30UL +#define MSDM_REG_DBG_FORCE_VALID \ + 0xfc0e34UL +#define MSDM_REG_DBG_FORCE_FRAME \ + 0xfc0e38UL +#define USDM_REG_DBG_SELECT \ + 0xfd0e28UL +#define USDM_REG_DBG_DWORD_ENABLE \ + 0xfd0e2cUL +#define USDM_REG_DBG_SHIFT \ + 0xfd0e30UL +#define USDM_REG_DBG_FORCE_VALID \ + 0xfd0e34UL +#define USDM_REG_DBG_FORCE_FRAME \ + 0xfd0e38UL +#define XCM_REG_DBG_SELECT \ + 0x1000040UL +#define XCM_REG_DBG_DWORD_ENABLE \ + 0x1000044UL +#define XCM_REG_DBG_SHIFT \ + 0x1000048UL +#define XCM_REG_DBG_FORCE_VALID \ + 0x100004cUL +#define XCM_REG_DBG_FORCE_FRAME \ + 0x1000050UL +#define YCM_REG_DBG_SELECT \ + 0x1080040UL +#define YCM_REG_DBG_DWORD_ENABLE \ + 0x1080044UL +#define YCM_REG_DBG_SHIFT \ + 0x1080048UL +#define YCM_REG_DBG_FORCE_VALID \ + 0x108004cUL +#define YCM_REG_DBG_FORCE_FRAME \ + 0x1080050UL +#define PCM_REG_DBG_SELECT \ + 0x1100040UL +#define PCM_REG_DBG_DWORD_ENABLE \ + 0x1100044UL +#define PCM_REG_DBG_SHIFT \ + 0x1100048UL +#define PCM_REG_DBG_FORCE_VALID \ + 0x110004cUL +#define PCM_REG_DBG_FORCE_FRAME \ + 0x1100050UL +#define TCM_REG_DBG_SELECT \ + 0x1180040UL +#define TCM_REG_DBG_DWORD_ENABLE \ + 0x1180044UL +#define TCM_REG_DBG_SHIFT \ + 0x1180048UL +#define TCM_REG_DBG_FORCE_VALID \ + 0x118004cUL +#define TCM_REG_DBG_FORCE_FRAME \ + 0x1180050UL +#define MCM_REG_DBG_SELECT \ + 0x1200040UL +#define MCM_REG_DBG_DWORD_ENABLE \ + 0x1200044UL +#define MCM_REG_DBG_SHIFT \ + 0x1200048UL +#define MCM_REG_DBG_FORCE_VALID \ + 0x120004cUL +#define MCM_REG_DBG_FORCE_FRAME \ + 0x1200050UL +#define UCM_REG_DBG_SELECT \ + 0x1280050UL +#define UCM_REG_DBG_DWORD_ENABLE \ + 0x1280054UL +#define UCM_REG_DBG_SHIFT \ + 0x1280058UL +#define UCM_REG_DBG_FORCE_VALID \ + 0x128005cUL +#define UCM_REG_DBG_FORCE_FRAME \ + 0x1280060UL +#define XSEM_REG_DBG_SELECT \ + 0x1401528UL +#define XSEM_REG_DBG_DWORD_ENABLE \ + 0x140152cUL +#define XSEM_REG_DBG_SHIFT \ + 0x1401530UL +#define XSEM_REG_DBG_FORCE_VALID \ + 0x1401534UL +#define XSEM_REG_DBG_FORCE_FRAME \ + 0x1401538UL +#define YSEM_REG_DBG_SELECT \ + 0x1501528UL +#define YSEM_REG_DBG_DWORD_ENABLE \ + 0x150152cUL +#define YSEM_REG_DBG_SHIFT \ + 0x1501530UL +#define YSEM_REG_DBG_FORCE_VALID \ + 0x1501534UL +#define YSEM_REG_DBG_FORCE_FRAME \ + 0x1501538UL +#define PSEM_REG_DBG_SELECT \ + 0x1601528UL +#define PSEM_REG_DBG_DWORD_ENABLE \ + 0x160152cUL +#define PSEM_REG_DBG_SHIFT \ + 0x1601530UL +#define PSEM_REG_DBG_FORCE_VALID \ + 0x1601534UL +#define PSEM_REG_DBG_FORCE_FRAME \ + 0x1601538UL +#define TSEM_REG_DBG_SELECT \ + 0x1701528UL +#define TSEM_REG_DBG_DWORD_ENABLE \ + 0x170152cUL +#define TSEM_REG_DBG_SHIFT \ + 0x1701530UL +#define TSEM_REG_DBG_FORCE_VALID \ + 0x1701534UL +#define TSEM_REG_DBG_FORCE_FRAME \ + 0x1701538UL +#define DORQ_REG_PF_USAGE_CNT \ + 0x1009c0UL +#define DORQ_REG_PF_OVFL_STICKY \ + 0x1009d0UL +#define DORQ_REG_DPM_FORCE_ABORT \ + 0x1009d8UL +#define DORQ_REG_INT_STS \ + 0x100180UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR \ + (0x1UL << 0) +#define DORQ_REG_INT_STS_WR \ + 0x100188UL +#define DORQ_REG_DB_DROP_DETAILS_REL \ + 0x100a28UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \ + 0 +#define DORQ_REG_INT_STS_DB_DROP \ + (0x1UL << 1) +#define DORQ_REG_INT_STS_DB_DROP_SHIFT \ + 1 +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \ + (0x1UL << 2) +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \ + 2 +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\ + (0x1UL << 3) +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \ + 3 +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \ + (0x1UL << 4) +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \ + 4 +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \ + (0x1UL << 5) +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \ + 5 +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \ + (0x1UL << 6) +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \ + 6 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \ + (0x1UL << 7) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \ + 7 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \ + (0x1UL << 8) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \ + 8 +#define DORQ_REG_DB_DROP_DETAILS_REASON \ + 0x100a20UL +#define MSEM_REG_DBG_SELECT \ + 0x1801528UL +#define MSEM_REG_DBG_DWORD_ENABLE \ + 0x180152cUL +#define MSEM_REG_DBG_SHIFT \ + 0x1801530UL +#define MSEM_REG_DBG_FORCE_VALID \ + 0x1801534UL +#define MSEM_REG_DBG_FORCE_FRAME \ + 0x1801538UL +#define USEM_REG_DBG_SELECT \ + 0x1901528UL +#define USEM_REG_DBG_DWORD_ENABLE \ + 0x190152cUL +#define USEM_REG_DBG_SHIFT \ + 0x1901530UL +#define USEM_REG_DBG_FORCE_VALID \ + 0x1901534UL +#define USEM_REG_DBG_FORCE_FRAME \ + 0x1901538UL +#define NWS_REG_DBG_SELECT_K2_E5 \ + 0x700128UL +#define NWS_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x70012cUL +#define NWS_REG_DBG_SHIFT_K2_E5 \ + 0x700130UL +#define NWS_REG_DBG_FORCE_VALID_K2_E5 \ + 0x700134UL +#define NWS_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x700138UL +#define MS_REG_DBG_SELECT_K2_E5 \ + 0x6a0228UL +#define MS_REG_DBG_DWORD_ENABLE_K2_E5 \ + 0x6a022cUL +#define MS_REG_DBG_SHIFT_K2_E5 \ + 0x6a0230UL +#define MS_REG_DBG_FORCE_VALID_K2_E5 \ + 0x6a0234UL +#define MS_REG_DBG_FORCE_FRAME_K2_E5 \ + 0x6a0238UL +#define PCIE_REG_DBG_COMMON_SELECT_K2_E5 \ + 0x054398UL +#define PCIE_REG_DBG_COMMON_DWORD_ENABLE_K2_E5 \ + 0x05439cUL +#define PCIE_REG_DBG_COMMON_SHIFT_K2_E5 \ + 0x0543a0UL +#define PCIE_REG_DBG_COMMON_FORCE_VALID_K2_E5 \ + 0x0543a4UL +#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \ + 0x0543a8UL +#define PTLD_REG_DBG_SELECT_E5 \ + 0x5a1600UL +#define PTLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5a1604UL +#define PTLD_REG_DBG_SHIFT_E5 \ + 0x5a1608UL +#define PTLD_REG_DBG_FORCE_VALID_E5 \ + 0x5a160cUL +#define PTLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5a1610UL +#define YPLD_REG_DBG_SELECT_E5 \ + 0x5c1600UL +#define YPLD_REG_DBG_DWORD_ENABLE_E5 \ + 0x5c1604UL +#define YPLD_REG_DBG_SHIFT_E5 \ + 0x5c1608UL +#define YPLD_REG_DBG_FORCE_VALID_E5 \ + 0x5c160cUL +#define YPLD_REG_DBG_FORCE_FRAME_E5 \ + 0x5c1610UL +#define RGSRC_REG_DBG_SELECT_E5 \ + 0x320040UL +#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x320044UL +#define RGSRC_REG_DBG_SHIFT_E5 \ + 0x320048UL +#define RGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32004cUL +#define RGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x320050UL +#define TGSRC_REG_DBG_SELECT_E5 \ + 0x322040UL +#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \ + 0x322044UL +#define TGSRC_REG_DBG_SHIFT_E5 \ + 0x322048UL +#define TGSRC_REG_DBG_FORCE_VALID_E5 \ + 0x32204cUL +#define TGSRC_REG_DBG_FORCE_FRAME_E5 \ + 0x322050UL +#define MISC_REG_RESET_PL_UA \ + 0x008050UL +#define MISC_REG_RESET_PL_HV \ + 0x008060UL +#define XCM_REG_CTX_RBC_ACCS \ + 0x1001800UL +#define XCM_REG_AGG_CON_CTX \ + 0x1001804UL +#define XCM_REG_SM_CON_CTX \ + 0x1001808UL +#define YCM_REG_CTX_RBC_ACCS \ + 0x1081800UL +#define YCM_REG_AGG_CON_CTX \ + 0x1081804UL +#define YCM_REG_AGG_TASK_CTX \ + 0x1081808UL +#define YCM_REG_SM_CON_CTX \ + 0x108180cUL +#define YCM_REG_SM_TASK_CTX \ + 0x1081810UL +#define PCM_REG_CTX_RBC_ACCS \ + 0x1101440UL +#define PCM_REG_SM_CON_CTX \ + 0x1101444UL +#define TCM_REG_CTX_RBC_ACCS \ + 0x11814c0UL +#define TCM_REG_AGG_CON_CTX \ + 0x11814c4UL +#define TCM_REG_AGG_TASK_CTX \ + 0x11814c8UL +#define TCM_REG_SM_CON_CTX \ + 0x11814ccUL +#define TCM_REG_SM_TASK_CTX \ + 0x11814d0UL +#define MCM_REG_CTX_RBC_ACCS \ + 0x1201800UL +#define MCM_REG_AGG_CON_CTX \ + 0x1201804UL +#define MCM_REG_AGG_TASK_CTX \ + 0x1201808UL +#define MCM_REG_SM_CON_CTX \ + 0x120180cUL +#define MCM_REG_SM_TASK_CTX \ + 0x1201810UL +#define UCM_REG_CTX_RBC_ACCS \ + 0x1281700UL +#define UCM_REG_AGG_CON_CTX \ + 0x1281704UL +#define UCM_REG_AGG_TASK_CTX \ + 0x1281708UL +#define UCM_REG_SM_CON_CTX \ + 0x128170cUL +#define UCM_REG_SM_TASK_CTX \ + 0x1281710UL +#define XSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ + 0x1401140UL +#define XSEM_REG_SYNC_DBG_EMPTY \ + 0x1401160UL +#define XSEM_REG_SLOW_DBG_ACTIVE \ + 0x1401400UL +#define XSEM_REG_SLOW_DBG_MODE \ + 0x1401404UL +#define XSEM_REG_DBG_FRAME_MODE \ + 0x1401408UL +#define XSEM_REG_DBG_GPRE_VECT \ + 0x1401410UL +#define XSEM_REG_DBG_MODE1_CFG \ + 0x1401420UL +#define XSEM_REG_FAST_MEMORY \ + 0x1440000UL +#define YSEM_REG_SYNC_DBG_EMPTY \ + 0x1501160UL +#define YSEM_REG_SLOW_DBG_ACTIVE \ + 0x1501400UL +#define YSEM_REG_SLOW_DBG_MODE \ + 0x1501404UL +#define YSEM_REG_DBG_FRAME_MODE \ + 0x1501408UL +#define YSEM_REG_DBG_GPRE_VECT \ + 0x1501410UL +#define YSEM_REG_DBG_MODE1_CFG \ + 0x1501420UL +#define YSEM_REG_FAST_MEMORY \ + 0x1540000UL +#define PSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ + 0x1601140UL +#define PSEM_REG_SYNC_DBG_EMPTY \ + 0x1601160UL +#define PSEM_REG_SLOW_DBG_ACTIVE \ + 0x1601400UL +#define PSEM_REG_SLOW_DBG_MODE \ + 0x1601404UL +#define PSEM_REG_DBG_FRAME_MODE \ + 0x1601408UL +#define PSEM_REG_DBG_GPRE_VECT \ + 0x1601410UL +#define PSEM_REG_DBG_MODE1_CFG \ + 0x1601420UL +#define PSEM_REG_FAST_MEMORY \ + 0x1640000UL +#define TSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ + 0x1701140UL +#define TSEM_REG_SYNC_DBG_EMPTY \ + 0x1701160UL +#define TSEM_REG_SLOW_DBG_ACTIVE \ + 0x1701400UL +#define TSEM_REG_SLOW_DBG_MODE \ + 0x1701404UL +#define TSEM_REG_DBG_FRAME_MODE \ + 0x1701408UL +#define TSEM_REG_DBG_GPRE_VECT \ + 0x1701410UL +#define TSEM_REG_DBG_MODE1_CFG \ + 0x1701420UL +#define TSEM_REG_FAST_MEMORY \ + 0x1740000UL +#define MSEM_REG_SLOW_DBG_EMPTY_BB_K2 \ + 0x1801140UL +#define MSEM_REG_SYNC_DBG_EMPTY \ + 0x1801160UL +#define MSEM_REG_SLOW_DBG_ACTIVE \ + 0x1801400UL +#define MSEM_REG_SLOW_DBG_MODE \ + 0x1801404UL +#define MSEM_REG_DBG_FRAME_MODE \ + 0x1801408UL +#define MSEM_REG_DBG_GPRE_VECT \ + 0x1801410UL +#define MSEM_REG_DBG_MODE1_CFG \ + 0x1801420UL +#define MSEM_REG_FAST_MEMORY \ + 0x1840000UL +#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \ + 0x1901140UL +#define SEM_FAST_REG_INT_RAM_SIZE \ + 20480 +#define USEM_REG_SYNC_DBG_EMPTY \ + 0x1901160UL +#define USEM_REG_SLOW_DBG_ACTIVE \ + 0x1901400UL +#define USEM_REG_SLOW_DBG_MODE \ + 0x1901404UL +#define USEM_REG_DBG_FRAME_MODE \ + 0x1901408UL +#define USEM_REG_DBG_GPRE_VECT \ + 0x1901410UL +#define USEM_REG_DBG_MODE1_CFG \ + 0x1901420UL +#define USEM_REG_FAST_MEMORY \ + 0x1940000UL +#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \ + 0x000748UL +#define SEM_FAST_REG_DBG_MODSRC_DISABLE \ + 0x00074cUL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \ + 0x000750UL +#define SEM_FAST_REG_DEBUG_ACTIVE \ + 0x000740UL +#define SEM_FAST_REG_INT_RAM \ + 0x020000UL +#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \ + 20480 +#define SEM_FAST_REG_RECORD_FILTER_ENABLE \ + 0x000768UL +#define GRC_REG_TRACE_FIFO_VALID_DATA \ + 0x050064UL +#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \ + 0x05040cUL +#define GRC_REG_PROTECTION_OVERRIDE_WINDOW \ + 0x050500UL +#define IGU_REG_ERROR_HANDLING_MEMORY \ + 0x181520UL +#define MCP_REG_CPU_MODE \ + 0xe05000UL +#define MCP_REG_CPU_MODE_SOFT_HALT \ + (0x1 << 10) +#define BRB_REG_BIG_RAM_ADDRESS \ + 0x340800UL +#define BRB_REG_BIG_RAM_DATA \ + 0x341500UL +#define BRB_REG_BIG_RAM_DATA_SIZE \ + 64 +#define SEM_FAST_REG_STALL_0 \ + 0x000488UL +#define SEM_FAST_REG_STALLED \ + 0x000494UL +#define BTB_REG_BIG_RAM_ADDRESS \ + 0xdb0800UL +#define BTB_REG_BIG_RAM_DATA \ + 0xdb0c00UL +#define BMB_REG_BIG_RAM_ADDRESS \ + 0x540800UL +#define BMB_REG_BIG_RAM_DATA \ + 0x540f00UL +#define SEM_FAST_REG_STORM_REG_FILE \ + 0x008000UL +#define RSS_REG_RSS_RAM_ADDR \ + 0x238c30UL +#define MISCS_REG_BLOCK_256B_EN \ + 0x009074UL +#define MCP_REG_SCRATCH_SIZE_BB_K2 \ + 57344 +#define MCP_REG_CPU_REG_FILE \ + 0xe05200UL +#define MCP_REG_CPU_REG_FILE_SIZE \ + 32 +#define DBG_REG_DEBUG_TARGET \ + 0x01005cUL +#define DBG_REG_FULL_MODE \ + 0x010060UL +#define DBG_REG_CALENDAR_OUT_DATA \ + 0x010480UL +#define GRC_REG_TRACE_FIFO \ + 0x050068UL +#define IGU_REG_ERROR_HANDLING_DATA_VALID \ + 0x181530UL +#define DBG_REG_DBG_BLOCK_ON \ + 0x010454UL +#define DBG_REG_FILTER_ENABLE \ + 0x0109d0UL +#define DBG_REG_FRAMING_MODE \ + 0x010058UL +#define DBG_REG_TRIGGER_ENABLE \ + 0x01054cUL +#define SEM_FAST_REG_VFC_DATA_WR \ + 0x000b40UL +#define SEM_FAST_REG_VFC_ADDR \ + 0x000b44UL +#define SEM_FAST_REG_VFC_DATA_RD \ + 0x000b48UL +#define SEM_FAST_REG_VFC_STATUS \ + 0x000b4cUL +#define RSS_REG_RSS_RAM_DATA \ + 0x238c20UL +#define RSS_REG_RSS_RAM_DATA_SIZE \ + 4 +#define MISC_REG_BLOCK_256B_EN \ + 0x008c14UL +#define NWS_REG_NWS_CMU_K2 \ + 0x720000UL +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \ + 0x000680UL +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \ + 0x000684UL +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \ + 0x0006c0UL +#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \ + 0x0006c4UL +#define MS_REG_MS_CMU_K2 \ + 0x6a4000UL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \ + 0x000208UL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \ + 0x00020cUL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \ + 0x000210UL +#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \ + 0x000214UL +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \ + 0x000208UL +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \ + 0x00020cUL +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \ + 0x000210UL +#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \ + 0x000214UL +#define PHY_PCIE_REG_PHY0_K2 \ + 0x620000UL +#define PHY_PCIE_REG_PHY1_K2 \ + 0x624000UL +#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL +#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL +#define NIG_REG_PPF_TO_ENGINE_SEL_SIZE 8 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN 0x1f0968UL +#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL +#define DORQ_REG_PF_DPM_ENABLE 0x100510UL +#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL +#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL +#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL +#define NIG_REG_RX_PTP_EN 0x501900UL +#define NIG_REG_TX_PTP_EN 0x501904UL +#define NIG_REG_LLH_PTP_TO_HOST 0x501908UL +#define NIG_REG_LLH_PTP_TO_MCP 0x50190cUL +#define NIG_REG_PTP_SW_TXTSEN 0x501910UL +#define NIG_REG_LLH_PTP_ETHERTYPE_1 0x501914UL +#define NIG_REG_LLH_PTP_MAC_DA_2_LSB 0x501918UL +#define NIG_REG_LLH_PTP_MAC_DA_2_MSB 0x50191cUL +#define NIG_REG_LLH_PTP_PARAM_MASK 0x501920UL +#define NIG_REG_LLH_PTP_RULE_MASK 0x501924UL +#define NIG_REG_TX_LLH_PTP_PARAM_MASK 0x501928UL +#define NIG_REG_TX_LLH_PTP_RULE_MASK 0x50192cUL +#define NIG_REG_LLH_PTP_HOST_BUF_SEQID 0x501930UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_LSB 0x501934UL +#define NIG_REG_LLH_PTP_HOST_BUF_TS_MSB 0x501938UL +#define NIG_REG_LLH_PTP_MCP_BUF_SEQID 0x50193cUL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_LSB 0x501940UL +#define NIG_REG_LLH_PTP_MCP_BUF_TS_MSB 0x501944UL +#define NIG_REG_TX_LLH_PTP_BUF_SEQID 0x501948UL +#define NIG_REG_TX_LLH_PTP_BUF_TS_LSB 0x50194cUL +#define NIG_REG_TX_LLH_PTP_BUF_TS_MSB 0x501950UL +#define NIG_REG_RX_PTP_TS_MSB_ERR 0x501954UL +#define NIG_REG_TX_PTP_TS_MSB_ERR 0x501958UL +#define NIG_REG_TSGEN_SYNC_TIME_LSB 0x5088c0UL +#define NIG_REG_TSGEN_SYNC_TIME_MSB 0x5088c4UL +#define NIG_REG_TSGEN_RST_DRIFT_CNTR 0x5088d8UL +#define NIG_REG_TSGEN_DRIFT_CNTR_CONF 0x5088dcUL +#define NIG_REG_TS_OUTPUT_ENABLE_PDA 0x508870UL +#define NIG_REG_TIMESYNC_GEN_REG_BB 0x500d00UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_LSB 0x5088a8UL +#define NIG_REG_TSGEN_FREE_CNT_VALUE_MSB 0x5088acUL +#define NIG_REG_PTP_LATCH_OSTS_PKT_TIME 0x509040UL +#define PSWRQ2_REG_WR_MBS0 0x240400UL + +#define PGLUE_B_REG_PGL_ADDR_E8_F0_K2 0x2aaf98UL +#define PGLUE_B_REG_PGL_ADDR_EC_F0_K2 0x2aaf9cUL +#define PGLUE_B_REG_PGL_ADDR_F0_F0_K2 0x2aafa0UL +#define PGLUE_B_REG_PGL_ADDR_F4_F0_K2 0x2aafa4UL +#define PGLUE_B_REG_MASTER_WRITE_PAD_ENABLE 0x2aae30UL +#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL +#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL + +#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0) +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0 +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1) +#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 + +#define PRS_REG_SEARCH_GFT 0x1f11bcUL +#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL +#define PRS_REG_CM_HDR_GFT 0x1f11c8UL +#define PRS_REG_GFT_CAM 0x1f1100UL +#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL +#define PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT 0 +#define PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT 8 +#define PRS_REG_LOAD_L2_FILTER 0x1f0198UL + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c new file mode 100644 index 0000000000..134ecfca96 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c @@ -0,0 +1,1143 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include <linux/if_vlan.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_ll2.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include <linux/qed/qed_rdma_if.h> +#include "qed_rdma.h" +#include "qed_roce.h" +#include "qed_sp.h" + +static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); + +static int qed_roce_async_event(struct qed_hwfn *p_hwfn, u8 fw_event_code, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + struct qed_rdma_events events = p_hwfn->p_rdma_info->events; + union rdma_eqe_data *rdata = &data->rdma_data; + + if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { + u16 icid = (u16)le32_to_cpu(rdata->rdma_destroy_qp_data.cid); + + /* icid release in this async event can occur only if the icid + * was offloaded to the FW. In case it wasn't offloaded this is + * handled in qed_roce_sp_destroy_qp. + */ + qed_roce_free_real_icid(p_hwfn, icid); + } else if (fw_event_code == ROCE_ASYNC_EVENT_SRQ_EMPTY || + fw_event_code == ROCE_ASYNC_EVENT_SRQ_LIMIT) { + u16 srq_id = (u16)le32_to_cpu(rdata->async_handle.lo); + + events.affiliated_event(events.context, fw_event_code, + &srq_id); + } else { + events.affiliated_event(events.context, fw_event_code, + (void *)&rdata->async_handle); + } + + return 0; +} + +void qed_roce_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map; + int wait_count = 0; + + /* when destroying a_RoCE QP the control is returned to the user after + * the synchronous part. The asynchronous part may take a little longer. + * We delay for a short while if an async destroy QP is still expected. + * Beyond the added delay we clear the bitmap anyway. + */ + while (!bitmap_empty(rcid_map->bitmap, rcid_map->max_count)) { + /* If the HW device is during recovery, all resources are + * immediately reset without receiving a per-cid indication + * from HW. In this case we don't expect the cid bitmap to be + * cleared. + */ + if (p_hwfn->cdev->recov_in_prog) + return; + + msleep(100); + if (wait_count++ > 20) { + DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n"); + break; + } + } +} + +static void qed_rdma_copy_gids(struct qed_rdma_qp *qp, __le32 *src_gid, + __le32 *dst_gid) +{ + u32 i; + + if (qp->roce_mode == ROCE_V2_IPV4) { + /* The IPv4 addresses shall be aligned to the highest word. + * The lower words must be zero. + */ + memset(src_gid, 0, sizeof(union qed_gid)); + memset(dst_gid, 0, sizeof(union qed_gid)); + src_gid[3] = cpu_to_le32(qp->sgid.ipv4_addr); + dst_gid[3] = cpu_to_le32(qp->dgid.ipv4_addr); + } else { + /* GIDs and IPv6 addresses coincide in location and size */ + for (i = 0; i < ARRAY_SIZE(qp->sgid.dwords); i++) { + src_gid[i] = cpu_to_le32(qp->sgid.dwords[i]); + dst_gid[i] = cpu_to_le32(qp->dgid.dwords[i]); + } + } +} + +static enum roce_flavor qed_roce_mode_to_flavor(enum roce_mode roce_mode) +{ + switch (roce_mode) { + case ROCE_V1: + return PLAIN_ROCE; + case ROCE_V2_IPV4: + return RROCE_IPV4; + case ROCE_V2_IPV6: + return RROCE_IPV6; + default: + return MAX_ROCE_FLAVOR; + } +} + +static void qed_roce_free_cid_pair(struct qed_hwfn *p_hwfn, u16 cid) +{ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid); + qed_bmap_release_id(p_hwfn, &p_hwfn->p_rdma_info->cid_map, cid + 1); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 responder_icid; + u32 requester_icid; + int rc; + + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, + &responder_icid); + if (rc) { + spin_unlock_bh(&p_rdma_info->lock); + return rc; + } + + rc = qed_rdma_bmap_alloc_id(p_hwfn, &p_rdma_info->cid_map, + &requester_icid); + + spin_unlock_bh(&p_rdma_info->lock); + if (rc) + goto err; + + /* the two icid's should be adjacent */ + if ((requester_icid - responder_icid) != 1) { + DP_NOTICE(p_hwfn, "Failed to allocate two adjacent qp's'\n"); + rc = -EINVAL; + goto err; + } + + responder_icid += qed_cxt_get_proto_cid_start(p_hwfn, + p_rdma_info->proto); + requester_icid += qed_cxt_get_proto_cid_start(p_hwfn, + p_rdma_info->proto); + + /* If these icids require a new ILT line allocate DMA-able context for + * an ILT page + */ + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, responder_icid); + if (rc) + goto err; + + rc = qed_cxt_dynamic_ilt_alloc(p_hwfn, QED_ELEM_CXT, requester_icid); + if (rc) + goto err; + + *cid = (u16)responder_icid; + return rc; + +err: + spin_lock_bh(&p_rdma_info->lock); + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, responder_icid); + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, requester_icid); + + spin_unlock_bh(&p_rdma_info->lock); + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, + "Allocate CID - failed, rc = %d\n", rc); + return rc; +} + +static void qed_roce_set_real_cid(struct qed_hwfn *p_hwfn, u32 cid) +{ + spin_lock_bh(&p_hwfn->p_rdma_info->lock); + qed_bmap_set_id(p_hwfn, &p_hwfn->p_rdma_info->real_cid_map, cid); + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +static u8 qed_roce_get_qp_tc(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + u8 pri, tc = 0; + + if (qp->vlan_id) { + pri = (qp->vlan_id & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + tc = qed_dcbx_get_priority_tc(p_hwfn, pri); + } + + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u tc: %u (vlan priority %s)\n", + qp->icid, tc, qp->vlan_id ? "enabled" : "disabled"); + + return tc; +} + +static int qed_roce_sp_create_responder(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp) +{ + struct roce_create_qp_resp_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + enum protocol_type proto; + u32 flags = 0; + int rc; + u8 tc; + + if (!qp->has_resp) + return 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + /* Allocate DMA-able memory for IRQ */ + qp->irq_num_pages = 1; + qp->irq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + RDMA_RING_PAGE_SIZE, + &qp->irq_phys_addr, GFP_KERNEL); + if (!qp->irq) { + rc = -ENOMEM; + DP_NOTICE(p_hwfn, + "qed create responder failed: cannot allocate memory (irq). rc = %d\n", + rc); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_CREATE_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + goto err; + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + qp->incoming_rdma_read_en); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + qp->incoming_rdma_write_en); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN, + qp->incoming_atomic_en); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + qp->e2e_flow_control_en); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG, qp->use_srq); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_KEY_EN, + qp->fmr_and_reserved_lkey); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, + qp->min_rnr_nak_timer); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + + p_ramrod = &p_ent->ramrod.roce_create_qp_resp; + p_ramrod->flags = cpu_to_le32(flags); + p_ramrod->max_ird = qp->max_rd_atomic_resp; + p_ramrod->traffic_class = qp->traffic_class_tos; + p_ramrod->hop_limit = qp->hop_limit_ttl; + p_ramrod->irq_num_pages = qp->irq_num_pages; + p_ramrod->p_key = cpu_to_le16(qp->pkey); + p_ramrod->flow_label = cpu_to_le32(qp->flow_label); + p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); + p_ramrod->mtu = cpu_to_le16(qp->mtu); + p_ramrod->initial_psn = cpu_to_le32(qp->rq_psn); + p_ramrod->pd = cpu_to_le16(qp->pd); + p_ramrod->rq_num_pages = cpu_to_le16(qp->rq_num_pages); + DMA_REGPAIR_LE(p_ramrod->rq_pbl_addr, qp->rq_pbl_ptr); + DMA_REGPAIR_LE(p_ramrod->irq_pbl_addr, qp->irq_phys_addr); + qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; + p_ramrod->cq_cid = cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | + qp->rq_cq_id); + p_ramrod->xrc_domain = cpu_to_le16(qp->xrcd_id); + + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); + p_ramrod->regular_latency_phy_queue = + cpu_to_le16(regular_latency_queue); + p_ramrod->low_latency_phy_queue = + cpu_to_le16(low_latency_queue); + + p_ramrod->dpi = cpu_to_le16(qp->dpi); + + qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); + qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); + + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); + p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); + p_ramrod->srq_id.srq_idx = cpu_to_le16(qp->srq_id); + p_ramrod->srq_id.opaque_fid = cpu_to_le16(p_hwfn->hw_info.opaque_fid); + + p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + + qp->stats_queue; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + qp->resp_offloaded = true; + qp->cq_prod = 0; + + proto = p_hwfn->p_rdma_info->proto; + qed_roce_set_real_cid(p_hwfn, qp->icid - + qed_cxt_get_proto_cid_start(p_hwfn, proto)); + + return rc; + +err: + DP_NOTICE(p_hwfn, "create responder - failed, rc = %d\n", rc); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + qp->irq_num_pages * RDMA_RING_PAGE_SIZE, + qp->irq, qp->irq_phys_addr); + + return rc; +} + +static int qed_roce_sp_create_requester(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp) +{ + struct roce_create_qp_req_ramrod_data *p_ramrod; + u16 regular_latency_queue, low_latency_queue; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + enum protocol_type proto; + u16 flags = 0; + int rc; + u8 tc; + + if (!qp->has_req) + return 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + /* Allocate DMA-able memory for ORQ */ + qp->orq_num_pages = 1; + qp->orq = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + RDMA_RING_PAGE_SIZE, + &qp->orq_phys_addr, GFP_KERNEL); + if (!qp->orq) { + rc = -ENOMEM; + DP_NOTICE(p_hwfn, + "qed create requester failed: cannot allocate memory (orq). rc = %d\n", + rc); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid + 1; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ROCE_RAMROD_CREATE_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + goto err; + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR, + qed_roce_mode_to_flavor(qp->roce_mode)); + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN, + qp->fmr_and_reserved_lkey); + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP, + qp->signal_all); + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, + qp->retry_cnt); + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + qp->rnr_retry_cnt); + + SET_FIELD(flags, ROCE_CREATE_QP_REQ_RAMROD_DATA_XRC_FLAG, + qed_rdma_is_xrc_qp(qp)); + + p_ramrod = &p_ent->ramrod.roce_create_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + + SET_FIELD(p_ramrod->flags2, ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE, + qp->edpm_mode); + + p_ramrod->max_ord = qp->max_rd_atomic_req; + p_ramrod->traffic_class = qp->traffic_class_tos; + p_ramrod->hop_limit = qp->hop_limit_ttl; + p_ramrod->orq_num_pages = qp->orq_num_pages; + p_ramrod->p_key = cpu_to_le16(qp->pkey); + p_ramrod->flow_label = cpu_to_le32(qp->flow_label); + p_ramrod->dst_qp_id = cpu_to_le32(qp->dest_qp); + p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); + p_ramrod->mtu = cpu_to_le16(qp->mtu); + p_ramrod->initial_psn = cpu_to_le32(qp->sq_psn); + p_ramrod->pd = cpu_to_le16(qp->pd); + p_ramrod->sq_num_pages = cpu_to_le16(qp->sq_num_pages); + DMA_REGPAIR_LE(p_ramrod->sq_pbl_addr, qp->sq_pbl_ptr); + DMA_REGPAIR_LE(p_ramrod->orq_pbl_addr, qp->orq_phys_addr); + qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); + p_ramrod->qp_handle_for_async.hi = qp->qp_handle_async.hi; + p_ramrod->qp_handle_for_async.lo = qp->qp_handle_async.lo; + p_ramrod->qp_handle_for_cqe.hi = qp->qp_handle.hi; + p_ramrod->qp_handle_for_cqe.lo = qp->qp_handle.lo; + p_ramrod->cq_cid = + cpu_to_le32((p_hwfn->hw_info.opaque_fid << 16) | qp->sq_cq_id); + + tc = qed_roce_get_qp_tc(p_hwfn, qp); + regular_latency_queue = qed_get_cm_pq_idx_ofld_mtc(p_hwfn, tc); + low_latency_queue = qed_get_cm_pq_idx_llt_mtc(p_hwfn, tc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "qp icid %u pqs: regular_latency %u low_latency %u\n", + qp->icid, regular_latency_queue - CM_TX_PQ_BASE, + low_latency_queue - CM_TX_PQ_BASE); + p_ramrod->regular_latency_phy_queue = + cpu_to_le16(regular_latency_queue); + p_ramrod->low_latency_phy_queue = + cpu_to_le16(low_latency_queue); + + p_ramrod->dpi = cpu_to_le16(qp->dpi); + + qed_rdma_set_fw_mac(p_ramrod->remote_mac_addr, qp->remote_mac_addr); + qed_rdma_set_fw_mac(p_ramrod->local_mac_addr, qp->local_mac_addr); + + p_ramrod->udp_src_port = cpu_to_le16(qp->udp_src_port); + p_ramrod->vlan_id = cpu_to_le16(qp->vlan_id); + p_ramrod->stats_counter_id = RESC_START(p_hwfn, QED_RDMA_STATS_QUEUE) + + qp->stats_queue; + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + qp->req_offloaded = true; + proto = p_hwfn->p_rdma_info->proto; + qed_roce_set_real_cid(p_hwfn, + qp->icid + 1 - + qed_cxt_get_proto_cid_start(p_hwfn, proto)); + + return rc; + +err: + DP_NOTICE(p_hwfn, "Create requested - failed, rc = %d\n", rc); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + qp->orq_num_pages * RDMA_RING_PAGE_SIZE, + qp->orq, qp->orq_phys_addr); + return rc; +} + +static int qed_roce_sp_modify_responder(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + bool move_to_err, u32 modify_flags) +{ + struct roce_modify_qp_resp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 flags = 0; + int rc; + + if (!qp->has_resp) + return 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + if (move_to_err && !qp->resp_offloaded) + return 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ROCE_EVENT_MODIFY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) { + DP_NOTICE(p_hwfn, "rc = %d\n", rc); + return rc; + } + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN, + qp->incoming_rdma_read_en); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN, + qp->incoming_rdma_write_en); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN, + qp->incoming_atomic_en); + + SET_FIELD(flags, ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN, + qp->e2e_flow_control_en); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG, + GET_FIELD(modify_flags, + QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN)); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG, + GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG, + GET_FIELD(modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG, + GET_FIELD(modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP)); + + SET_FIELD(flags, ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG, + GET_FIELD(modify_flags, + QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER)); + + p_ramrod = &p_ent->ramrod.roce_modify_qp_resp; + p_ramrod->flags = cpu_to_le16(flags); + + p_ramrod->fields = 0; + SET_FIELD(p_ramrod->fields, + ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER, + qp->min_rnr_nak_timer); + + p_ramrod->max_ird = qp->max_rd_atomic_resp; + p_ramrod->traffic_class = qp->traffic_class_tos; + p_ramrod->hop_limit = qp->hop_limit_ttl; + p_ramrod->p_key = cpu_to_le16(qp->pkey); + p_ramrod->flow_label = cpu_to_le32(qp->flow_label); + p_ramrod->mtu = cpu_to_le16(qp->mtu); + qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify responder, rc = %d\n", rc); + return rc; +} + +static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + bool move_to_sqd, + bool move_to_err, u32 modify_flags) +{ + struct roce_modify_qp_req_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + u16 flags = 0; + int rc; + + if (!qp->has_req) + return 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + if (move_to_err && !(qp->req_offloaded)) + return 0; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid + 1; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ROCE_EVENT_MODIFY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) { + DP_NOTICE(p_hwfn, "rc = %d\n", rc); + return rc; + } + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG, + !!move_to_err); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG, + !!move_to_sqd); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY, + qp->sqd_async); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG, + GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_PKEY)); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG, + GET_FIELD(modify_flags, + QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR)); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG, + GET_FIELD(modify_flags, + QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ)); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG, + GET_FIELD(modify_flags, + QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT)); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG, + GET_FIELD(modify_flags, QED_ROCE_MODIFY_QP_VALID_RETRY_CNT)); + + SET_FIELD(flags, ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG, + GET_FIELD(modify_flags, + QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT)); + + p_ramrod = &p_ent->ramrod.roce_modify_qp_req; + p_ramrod->flags = cpu_to_le16(flags); + + p_ramrod->fields = 0; + SET_FIELD(p_ramrod->fields, + ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT, qp->retry_cnt); + SET_FIELD(p_ramrod->fields, ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT, + qp->rnr_retry_cnt); + + p_ramrod->max_ord = qp->max_rd_atomic_req; + p_ramrod->traffic_class = qp->traffic_class_tos; + p_ramrod->hop_limit = qp->hop_limit_ttl; + p_ramrod->p_key = cpu_to_le16(qp->pkey); + p_ramrod->flow_label = cpu_to_le32(qp->flow_label); + p_ramrod->ack_timeout_val = cpu_to_le32(qp->ack_timeout); + p_ramrod->mtu = cpu_to_le16(qp->mtu); + qed_rdma_copy_gids(qp, p_ramrod->src_gid, p_ramrod->dst_gid); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Modify requester, rc = %d\n", rc); + return rc; +} + +static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + u32 *cq_prod) +{ + struct roce_destroy_qp_resp_output_params *p_ramrod_res; + struct roce_destroy_qp_resp_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t ramrod_res_phys; + int rc; + + if (!qp->has_resp) { + *cq_prod = 0; + return 0; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + *cq_prod = qp->cq_prod; + + if (!qp->resp_offloaded) { + /* If a responder was never offload, we need to free the cids + * allocated in create_qp as a FW async event will never arrive + */ + u32 cid; + + cid = qp->icid - + qed_cxt_get_proto_cid_start(p_hwfn, + p_hwfn->p_rdma_info->proto); + qed_roce_free_cid_pair(p_hwfn, (u16)cid); + + return 0; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + ROCE_RAMROD_DESTROY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.roce_destroy_qp_resp; + + p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_ramrod_res), + &ramrod_res_phys, GFP_KERNEL); + + if (!p_ramrod_res) { + rc = -ENOMEM; + DP_NOTICE(p_hwfn, + "qed destroy responder failed: cannot allocate memory (ramrod). rc = %d\n", + rc); + qed_sp_destroy_request(p_hwfn, p_ent); + return rc; + } + + DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); + qp->cq_prod = *cq_prod; + + /* Free IRQ - only if ramrod succeeded, in case FW is still using it */ + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + qp->irq_num_pages * RDMA_RING_PAGE_SIZE, + qp->irq, qp->irq_phys_addr); + + qp->resp_offloaded = false; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy responder, rc = %d\n", rc); + +err: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(struct roce_destroy_qp_resp_output_params), + p_ramrod_res, ramrod_res_phys); + + return rc; +} + +static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp) +{ + struct roce_destroy_qp_req_output_params *p_ramrod_res; + struct roce_destroy_qp_req_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; + struct qed_spq_entry *p_ent; + dma_addr_t ramrod_res_phys; + int rc = -ENOMEM; + + if (!qp->has_req) + return 0; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); + + if (!qp->req_offloaded) + return 0; + + p_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_ramrod_res), + &ramrod_res_phys, GFP_KERNEL); + if (!p_ramrod_res) { + DP_NOTICE(p_hwfn, + "qed destroy requester failed: cannot allocate memory (ramrod)\n"); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid + 1; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_DESTROY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + goto err; + + p_ramrod = &p_ent->ramrod.roce_destroy_qp_req; + DMA_REGPAIR_LE(p_ramrod->output_params_addr, ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err; + + /* Free ORQ - only if ramrod succeeded, in case FW is still using it */ + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + qp->orq_num_pages * RDMA_RING_PAGE_SIZE, + qp->orq, qp->orq_phys_addr); + + qp->req_offloaded = false; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Destroy requester, rc = %d\n", rc); + +err: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_ramrod_res), + p_ramrod_res, ramrod_res_phys); + + return rc; +} + +int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params) +{ + struct roce_query_qp_resp_output_params *p_resp_ramrod_res; + struct roce_query_qp_req_output_params *p_req_ramrod_res; + struct roce_query_qp_resp_ramrod_data *p_resp_ramrod; + struct roce_query_qp_req_ramrod_data *p_req_ramrod; + struct qed_sp_init_data init_data; + dma_addr_t resp_ramrod_res_phys; + dma_addr_t req_ramrod_res_phys; + struct qed_spq_entry *p_ent; + bool rq_err_state; + bool sq_err_state; + bool sq_draining; + int rc = -ENOMEM; + + if ((!(qp->resp_offloaded)) && (!(qp->req_offloaded))) { + /* We can't send ramrod to the fw since this qp wasn't offloaded + * to the fw yet + */ + out_params->draining = false; + out_params->rq_psn = qp->rq_psn; + out_params->sq_psn = qp->sq_psn; + out_params->state = qp->cur_state; + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "No QPs as no offload\n"); + return 0; + } + + if (!(qp->resp_offloaded)) { + DP_NOTICE(p_hwfn, + "The responder's qp should be offloaded before requester's\n"); + return -EINVAL; + } + + /* Send a query responder ramrod to FW to get RQ-PSN and state */ + p_resp_ramrod_res = + dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_resp_ramrod_res), + &resp_ramrod_res_phys, GFP_KERNEL); + if (!p_resp_ramrod_res) { + DP_NOTICE(p_hwfn, + "qed query qp failed: cannot allocate memory (ramrod)\n"); + return rc; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qp->icid; + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + goto err_resp; + + p_resp_ramrod = &p_ent->ramrod.roce_query_qp_resp; + DMA_REGPAIR_LE(p_resp_ramrod->output_params_addr, resp_ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err_resp; + + out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn); + rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags), + ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG); + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), + p_resp_ramrod_res, resp_ramrod_res_phys); + + if (!(qp->req_offloaded)) { + /* Don't send query qp for the requester */ + out_params->sq_psn = qp->sq_psn; + out_params->draining = false; + + if (rq_err_state) + qp->cur_state = QED_ROCE_QP_STATE_ERR; + + out_params->state = qp->cur_state; + + return 0; + } + + /* Send a query requester ramrod to FW to get SQ-PSN and state */ + p_req_ramrod_res = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(*p_req_ramrod_res), + &req_ramrod_res_phys, + GFP_KERNEL); + if (!p_req_ramrod_res) { + rc = -ENOMEM; + DP_NOTICE(p_hwfn, + "qed query qp failed: cannot allocate memory (ramrod)\n"); + return rc; + } + + /* Get SPQ entry */ + init_data.cid = qp->icid + 1; + rc = qed_sp_init_request(p_hwfn, &p_ent, ROCE_RAMROD_QUERY_QP, + PROTOCOLID_ROCE, &init_data); + if (rc) + goto err_req; + + p_req_ramrod = &p_ent->ramrod.roce_query_qp_req; + DMA_REGPAIR_LE(p_req_ramrod->output_params_addr, req_ramrod_res_phys); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + goto err_req; + + out_params->sq_psn = le32_to_cpu(p_req_ramrod_res->psn); + sq_err_state = GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), + ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG); + sq_draining = + GET_FIELD(le32_to_cpu(p_req_ramrod_res->flags), + ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG); + + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), + p_req_ramrod_res, req_ramrod_res_phys); + + out_params->draining = false; + + if (rq_err_state || sq_err_state) + qp->cur_state = QED_ROCE_QP_STATE_ERR; + else if (sq_draining) + out_params->draining = true; + out_params->state = qp->cur_state; + + return 0; + +err_req: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_req_ramrod_res), + p_req_ramrod_res, req_ramrod_res_phys); + return rc; +err_resp: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res), + p_resp_ramrod_res, resp_ramrod_res_phys); + return rc; +} + +int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) +{ + u32 cq_prod; + int rc; + + /* Destroys the specified QP */ + if ((qp->cur_state != QED_ROCE_QP_STATE_RESET) && + (qp->cur_state != QED_ROCE_QP_STATE_ERR) && + (qp->cur_state != QED_ROCE_QP_STATE_INIT)) { + DP_NOTICE(p_hwfn, + "QP must be in error, reset or init state before destroying it\n"); + return -EINVAL; + } + + if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { + rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, + &cq_prod); + if (rc) + return rc; + + /* Send destroy requester ramrod */ + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); + if (rc) + return rc; + } + + return 0; +} + +int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_roce_qp_state prev_state, + struct qed_rdma_modify_qp_in_params *params) +{ + int rc = 0; + + /* Perform additional operations according to the current state and the + * next state + */ + if (((prev_state == QED_ROCE_QP_STATE_INIT) || + (prev_state == QED_ROCE_QP_STATE_RESET)) && + (qp->cur_state == QED_ROCE_QP_STATE_RTR)) { + /* Init->RTR or Reset->RTR */ + rc = qed_roce_sp_create_responder(p_hwfn, qp); + return rc; + } else if ((prev_state == QED_ROCE_QP_STATE_RTR) && + (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { + /* RTR-> RTS */ + rc = qed_roce_sp_create_requester(p_hwfn, qp); + if (rc) + return rc; + + /* Send modify responder ramrod */ + rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, + params->modify_flags); + return rc; + } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && + (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { + /* RTS->RTS */ + rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, + params->modify_flags); + if (rc) + return rc; + + rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, + params->modify_flags); + return rc; + } else if ((prev_state == QED_ROCE_QP_STATE_RTS) && + (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { + /* RTS->SQD */ + rc = qed_roce_sp_modify_requester(p_hwfn, qp, true, false, + params->modify_flags); + return rc; + } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && + (qp->cur_state == QED_ROCE_QP_STATE_SQD)) { + /* SQD->SQD */ + rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, + params->modify_flags); + if (rc) + return rc; + + rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, + params->modify_flags); + return rc; + } else if ((prev_state == QED_ROCE_QP_STATE_SQD) && + (qp->cur_state == QED_ROCE_QP_STATE_RTS)) { + /* SQD->RTS */ + rc = qed_roce_sp_modify_responder(p_hwfn, qp, false, + params->modify_flags); + if (rc) + return rc; + + rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, false, + params->modify_flags); + + return rc; + } else if (qp->cur_state == QED_ROCE_QP_STATE_ERR) { + /* ->ERR */ + rc = qed_roce_sp_modify_responder(p_hwfn, qp, true, + params->modify_flags); + if (rc) + return rc; + + rc = qed_roce_sp_modify_requester(p_hwfn, qp, false, true, + params->modify_flags); + return rc; + } else if (qp->cur_state == QED_ROCE_QP_STATE_RESET) { + /* Any state -> RESET */ + u32 cq_prod; + + /* Send destroy responder ramrod */ + rc = qed_roce_sp_destroy_qp_responder(p_hwfn, + qp, + &cq_prod); + + if (rc) + return rc; + + qp->cq_prod = cq_prod; + + rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); + } + + return rc; +} + +static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid) +{ + struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; + u32 start_cid, cid, xcid; + + /* an even icid belongs to a responder while an odd icid belongs to a + * requester. The 'cid' received as an input can be either. We calculate + * the "partner" icid and call it xcid. Only if both are free then the + * "cid" map can be cleared. + */ + start_cid = qed_cxt_get_proto_cid_start(p_hwfn, p_rdma_info->proto); + cid = icid - start_cid; + xcid = cid ^ 1; + + spin_lock_bh(&p_rdma_info->lock); + + qed_bmap_release_id(p_hwfn, &p_rdma_info->real_cid_map, cid); + if (qed_bmap_test_id(p_hwfn, &p_rdma_info->real_cid_map, xcid) == 0) { + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, cid); + qed_bmap_release_id(p_hwfn, &p_rdma_info->cid_map, xcid); + } + + spin_unlock_bh(&p_hwfn->p_rdma_info->lock); +} + +void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u8 val; + + /* if any QPs are already active, we want to disable DPM, since their + * context information contains information from before the latest DCBx + * update. Otherwise enable it. + */ + val = qed_rdma_allocated_qps(p_hwfn) ? true : false; + p_hwfn->dcbx_no_edpm = (u8)val; + + qed_rdma_dpm_conf(p_hwfn, p_ptt); +} + +int qed_roce_setup(struct qed_hwfn *p_hwfn) +{ + return qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE, + qed_roce_async_event); +} + +int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ll2_ethertype_en; + + qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF, 0); + + p_hwfn->rdma_prs_search_reg = PRS_REG_SEARCH_ROCE; + + ll2_ethertype_en = qed_rd(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN); + qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, + (ll2_ethertype_en | 0x01)); + + if (qed_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_ROCE) % 2) { + DP_NOTICE(p_hwfn, "The first RoCE's cid should be even\n"); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Initializing HW - Done\n"); + return 0; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.h b/drivers/net/ethernet/qlogic/qed/qed_roce.h new file mode 100644 index 0000000000..3a4a2d72a8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_roce.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_ROCE_H +#define _QED_ROCE_H +#include <linux/types.h> +#include <linux/slab.h> + +#if IS_ENABLED(CONFIG_QED_RDMA) +void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +#else +static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) {} +#endif + +int qed_roce_setup(struct qed_hwfn *p_hwfn); +void qed_roce_stop(struct qed_hwfn *p_hwfn); +int qed_roce_init_hw(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); +int qed_roce_alloc_cid(struct qed_hwfn *p_hwfn, u16 *cid); +int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp); + +int qed_roce_query_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + struct qed_rdma_query_qp_out_params *out_params); + +int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, + struct qed_rdma_qp *qp, + enum qed_roce_qp_state prev_state, + struct qed_rdma_modify_qp_in_params *params); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c new file mode 100644 index 0000000000..6e70781ab8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2016 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/crc32.h> +#include "qed.h" +#include "qed_dev_api.h" +#include "qed_mcp.h" +#include "qed_sp.h" +#include "qed_selftest.h" + +int qed_selftest_memory(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_interrupt(struct qed_dev *cdev) +{ + int rc = 0, i; + + for_each_hwfn(cdev, i) { + rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]); + if (rc) + return rc; + } + + return rc; +} + +int qed_selftest_register(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_register_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} + +int qed_selftest_clock(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn; + struct qed_ptt *p_ptt; + int rc = 0, i; + + /* although performed by MCP, this test is per engine */ + for_each_hwfn(cdev, i) { + p_hwfn = &cdev->hwfns[i]; + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) + break; + } + + return rc; +} + +int qed_selftest_nvram(struct qed_dev *cdev) +{ + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 num_images, i, j, nvm_crc, calc_crc; + struct bist_nvm_image_att image_att; + u8 *buf = NULL; + __be32 val; + int rc; + + if (!p_ptt) { + DP_ERR(p_hwfn, "failed to acquire ptt\n"); + return -EBUSY; + } + + /* Acquire from MFW the amount of available images */ + rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, p_ptt, &num_images); + if (rc || !num_images) { + DP_ERR(p_hwfn, "Failed getting number of images\n"); + rc = -EINVAL; + goto err0; + } + + /* Iterate over images and validate CRC */ + for (i = 0; i < num_images; i++) { + /* This mailbox returns information about the image required for + * reading it. + */ + rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, + &image_att, i); + if (rc) { + DP_ERR(p_hwfn, + "Failed getting image index %d attributes\n", + i); + goto err0; + } + + /* After MFW crash dump is collected - the image's CRC stops + * being valid. + */ + if (image_att.image_type == NVM_TYPE_MDUMP) + continue; + + DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", + i, image_att.len); + + /* Allocate a buffer for holding the nvram image */ + buf = kzalloc(image_att.len, GFP_KERNEL); + if (!buf) { + rc = -ENOMEM; + goto err0; + } + + /* Read image into buffer */ + rc = qed_mcp_nvm_read(p_hwfn->cdev, image_att.nvm_start_addr, + buf, image_att.len); + if (rc) { + DP_ERR(p_hwfn, + "Failed reading image index %d from nvm.\n", i); + goto err1; + } + + /* Convert the buffer into big-endian format (excluding the + * closing 4 bytes of CRC). + */ + for (j = 0; j < image_att.len - 4; j += 4) { + val = cpu_to_be32(*(u32 *)&buf[j]); + *(u32 *)&buf[j] = (__force u32)val; + } + + /* Calc CRC for the "actual" image buffer, i.e. not including + * the last 4 CRC bytes. + */ + nvm_crc = *(u32 *)(buf + image_att.len - 4); + calc_crc = crc32(0xffffffff, buf, image_att.len - 4); + calc_crc = (__force u32)~cpu_to_be32(calc_crc); + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "nvm crc 0x%x, calc_crc 0x%x\n", nvm_crc, calc_crc); + + if (calc_crc != nvm_crc) { + rc = -EINVAL; + goto err1; + } + + /* Done with this image; Free to prevent double release + * on subsequent failure. + */ + kfree(buf); + buf = NULL; + } + + qed_ptt_release(p_hwfn, p_ptt); + return 0; + +err1: + kfree(buf); +err0: + qed_ptt_release(p_hwfn, p_ptt); + return rc; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h new file mode 100644 index 0000000000..7a3bd749e1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* Copyright (c) 2019-2020 Marvell International Ltd. */ + +#ifndef _QED_SELFTEST_API_H +#define _QED_SELFTEST_API_H +#include <linux/types.h> + +/** + * qed_selftest_memory(): Perform memory test. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_selftest_memory(struct qed_dev *cdev); + +/** + * qed_selftest_interrupt(): Perform interrupt test. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_selftest_interrupt(struct qed_dev *cdev); + +/** + * qed_selftest_register(): Perform register test. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_selftest_register(struct qed_dev *cdev); + +/** + * qed_selftest_clock(): Perform clock test. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_selftest_clock(struct qed_dev *cdev); + +/** + * qed_selftest_nvram(): Perform nvram test. + * + * @cdev: Qed dev pointer. + * + * Return: Int. + */ +int qed_selftest_nvram(struct qed_dev *cdev); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h new file mode 100644 index 0000000000..4fb02a5579 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -0,0 +1,477 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_SP_H +#define _QED_SP_H + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/qed/qed_chain.h> +#include "qed.h" +#include "qed_hsi.h" + +enum spq_mode { + QED_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */ + QED_SPQ_MODE_CB, /* Client supplies a callback */ + QED_SPQ_MODE_EBLOCK, /* QED should block until completion */ +}; + +struct qed_spq_comp_cb { + void (*function)(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, + u8 fw_return_code); + void *cookie; +}; + +/** + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. + * + * @p_hwfn: HW device data. + * @cqe: CQE. + * + * Return: Int. + */ +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe); + + /* QED Slow-hwfn queue interface */ +union ramrod_data { + struct pf_start_ramrod_data pf_start; + struct pf_update_ramrod_data pf_update; + struct rx_queue_start_ramrod_data rx_queue_start; + struct rx_queue_update_ramrod_data rx_queue_update; + struct rx_queue_stop_ramrod_data rx_queue_stop; + struct tx_queue_start_ramrod_data tx_queue_start; + struct tx_queue_stop_ramrod_data tx_queue_stop; + struct vport_start_ramrod_data vport_start; + struct vport_stop_ramrod_data vport_stop; + struct rx_update_gft_filter_ramrod_data rx_update_gft; + struct vport_update_ramrod_data vport_update; + struct core_rx_start_ramrod_data core_rx_queue_start; + struct core_rx_stop_ramrod_data core_rx_queue_stop; + struct core_tx_start_ramrod_data core_tx_queue_start; + struct core_tx_stop_ramrod_data core_tx_queue_stop; + struct vport_filter_update_ramrod_data vport_filter_update; + + struct rdma_init_func_ramrod_data rdma_init_func; + struct rdma_close_func_ramrod_data rdma_close_func; + struct rdma_register_tid_ramrod_data rdma_register_tid; + struct rdma_deregister_tid_ramrod_data rdma_deregister_tid; + struct roce_create_qp_resp_ramrod_data roce_create_qp_resp; + struct roce_create_qp_req_ramrod_data roce_create_qp_req; + struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp; + struct roce_modify_qp_req_ramrod_data roce_modify_qp_req; + struct roce_query_qp_resp_ramrod_data roce_query_qp_resp; + struct roce_query_qp_req_ramrod_data roce_query_qp_req; + struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp; + struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req; + struct roce_init_func_ramrod_data roce_init_func; + struct rdma_create_cq_ramrod_data rdma_create_cq; + struct rdma_destroy_cq_ramrod_data rdma_destroy_cq; + struct rdma_srq_create_ramrod_data rdma_create_srq; + struct rdma_srq_destroy_ramrod_data rdma_destroy_srq; + struct rdma_srq_modify_ramrod_data rdma_modify_srq; + struct iwarp_create_qp_ramrod_data iwarp_create_qp; + struct iwarp_tcp_offload_ramrod_data iwarp_tcp_offload; + struct iwarp_mpa_offload_ramrod_data iwarp_mpa_offload; + struct iwarp_modify_qp_ramrod_data iwarp_modify_qp; + struct iwarp_init_func_ramrod_data iwarp_init_func; + struct fcoe_init_ramrod_params fcoe_init; + struct fcoe_conn_offload_ramrod_params fcoe_conn_ofld; + struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate; + struct fcoe_stat_ramrod_params fcoe_stat; + + struct iscsi_init_ramrod_params iscsi_init; + struct iscsi_spe_conn_offload iscsi_conn_offload; + struct iscsi_conn_update_ramrod_params iscsi_conn_update; + struct iscsi_spe_conn_mac_update iscsi_conn_mac_update; + struct iscsi_spe_conn_termination iscsi_conn_terminate; + + struct nvmetcp_init_ramrod_params nvmetcp_init; + struct nvmetcp_spe_conn_offload nvmetcp_conn_offload; + struct nvmetcp_conn_update_ramrod_params nvmetcp_conn_update; + struct nvmetcp_spe_conn_termination nvmetcp_conn_terminate; + + struct vf_start_ramrod_data vf_start; + struct vf_stop_ramrod_data vf_stop; +}; + +#define EQ_MAX_CREDIT 0xffffffff + +enum spq_priority { + QED_SPQ_PRIORITY_NORMAL, + QED_SPQ_PRIORITY_HIGH, +}; + +union qed_spq_req_comp { + struct qed_spq_comp_cb cb; + u64 *done_addr; +}; + +struct qed_spq_comp_done { + unsigned int done; + u8 fw_return_code; +}; + +struct qed_spq_entry { + struct list_head list; + + u8 flags; + + /* HSI slow path element */ + struct slow_path_element elem; + + union ramrod_data ramrod; + + enum spq_priority priority; + + /* pending queue for this entry */ + struct list_head *queue; + + enum spq_mode comp_mode; + struct qed_spq_comp_cb comp_cb; + struct qed_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ + + /* Posted entry for unlimited list entry in EBLOCK mode */ + struct qed_spq_entry *post_ent; +}; + +struct qed_eq { + struct qed_chain chain; + u8 eq_sb_index; /* index within the SB */ + __le16 *p_fw_cons; /* ptr to index value */ +}; + +struct qed_consq { + struct qed_chain chain; +}; + +typedef int (*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn, u8 opcode, + __le16 echo, union event_ring_data *data, + u8 fw_return_code); + +int +qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id, + qed_spq_async_comp_cb cb); + +void +qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id); + +struct qed_spq { + spinlock_t lock; /* SPQ lock */ + + struct list_head unlimited_pending; + struct list_head pending; + struct list_head completion_pending; + struct list_head free_pool; + + struct qed_chain chain; + + /* allocated dma-able memory for spq entries (+ramrod data) */ + dma_addr_t p_phys; + struct qed_spq_entry *p_virt; + +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) + + /* Bitmap for handling out-of-order completions */ + DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE); + u8 comp_bitmap_idx; + + /* Statistics */ + u32 unlimited_pending_count; + u32 normal_count; + u32 high_count; + u32 comp_sent_count; + u32 comp_count; + + u32 cid; + u32 db_addr_offset; + struct core_db_data db_data; + qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; +}; + +/** + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. + * + * Return: Int. + */ +int qed_spq_post(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *fw_return_code); + +/** + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_spq_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_spq_setup(): Reset the SPQ to its start state. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_spq_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_spq_free(struct qed_hwfn *p_hwfn); + +/** + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. + * + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. + * + * Return: Int. + */ +int +qed_spq_get_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent); + +/** + * qed_spq_return_entry(): Return an entry to spq free pool list. + * + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. + */ +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); +/** + * qed_eq_alloc(): Allocates & initializes an EQ struct. + * + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. + * + * Return: Int. + */ +int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); + +/** + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_eq_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_eq_free(): deallocates the given EQ struct. + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_eq_free(struct qed_hwfn *p_hwfn); + +/** + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. + * + * Return: Void. + */ +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, + u16 prod); + +/** + * qed_eq_completion(): Completes currently pending EQ elements. + * + * @p_hwfn: HW device data. + * @cookie: Cookie. + * + * Return: Int. + */ +int qed_eq_completion(struct qed_hwfn *p_hwfn, + void *cookie); + +/** + * qed_spq_completion(): Completes a single event. + * + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). + * + * Return: Int. + */ +int qed_spq_completion(struct qed_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data); + +/** + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. + * + * @p_hwfn: HW device data. + * + * Return: u32 - SPQ CID. + */ +u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); + +/** + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_consq_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_consq_setup(): Reset the ConsQ to its start state. + * + * @p_hwfn: HW device data. + * + * Return Void. + */ +void qed_consq_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. + * + * Return Void. + */ +void qed_consq_free(struct qed_hwfn *p_hwfn); +int qed_spq_pend_post(struct qed_hwfn *p_hwfn); + +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ + +#define QED_SP_EQ_COMPLETION 0x01 +#define QED_SP_CQE_COMPLETION 0x02 + +struct qed_sp_init_data { + u32 cid; + u16 opaque_fid; + + /* Information regarding operation upon sending & completion */ + enum spq_mode comp_mode; + struct qed_spq_comp_cb *p_comp_data; +}; + +/** + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. + * + * Return: Void. + */ +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent); + +int qed_sp_init_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent, + u8 cmd, + u8 protocol, + struct qed_sp_init_data *p_data); + +/** + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. + * + * This ramrod is sent to initialize a physical function (PF). It will + * configure the function related parameters and write its completion to the + * event ring specified in the parameters. + * + * Ramrods complete on the common event ring for the PF. This ring is + * allocated by the driver on host memory and its parameters are written + * to the internal RAM of the UStorm by the Function Start Ramrod. + * + */ + +int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn, + bool allow_npar_tx_switch); + +/** + * qed_sp_pf_update(): PF Function Update Ramrod. + * + * @p_hwfn: HW device data. + * + * Return: Int. + * + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. + */ + +int qed_sp_pf_update(struct qed_hwfn *p_hwfn); + +/** + * qed_sp_pf_update_stag(): Update firmware of new outer tag. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); + +/** + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); + +int qed_sp_pf_stop(struct qed_hwfn *p_hwfn); + +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data); +/** + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c new file mode 100644 index 0000000000..3b54da9635 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -0,0 +1,595 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/string.h> +#include "qed.h" +#include <linux/qed/qed_chain.h> +#include "qed_cxt.h" +#include "qed_dcbx.h" +#include "qed_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" + +void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + /* qed_spq_get_entry() can either get an entry from the free_pool, + * or, if no entries are left, allocate a new entry and add it to + * the unlimited_pending list. + */ + if (p_ent->queue == &p_hwfn->p_spq->unlimited_pending) + kfree(p_ent); + else + qed_spq_return_entry(p_hwfn, p_ent); +} + +int qed_sp_init_request(struct qed_hwfn *p_hwfn, + struct qed_spq_entry **pp_ent, + u8 cmd, u8 protocol, struct qed_sp_init_data *p_data) +{ + u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; + struct qed_spq_entry *p_ent = NULL; + int rc; + + if (!pp_ent) + return -ENOMEM; + + rc = qed_spq_get_entry(p_hwfn, pp_ent); + + if (rc) + return rc; + + p_ent = *pp_ent; + + p_ent->elem.hdr.cid = cpu_to_le32(opaque_cid); + p_ent->elem.hdr.cmd_id = cmd; + p_ent->elem.hdr.protocol_id = protocol; + + p_ent->priority = QED_SPQ_PRIORITY_NORMAL; + p_ent->comp_mode = p_data->comp_mode; + p_ent->comp_done.done = 0; + + switch (p_ent->comp_mode) { + case QED_SPQ_MODE_EBLOCK: + p_ent->comp_cb.cookie = &p_ent->comp_done; + break; + + case QED_SPQ_MODE_BLOCK: + if (!p_data->p_comp_data) + goto err; + + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; + break; + + case QED_SPQ_MODE_CB: + if (!p_data->p_comp_data) + p_ent->comp_cb.function = NULL; + else + p_ent->comp_cb = *p_data->p_comp_data; + break; + + default: + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + goto err; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Initialized: CID %08x %s:[%02x] %s:%02x data_addr %llx comp_mode [%s]\n", + opaque_cid, qed_get_ramrod_cmd_id_str(protocol, cmd), + cmd, qed_get_protocol_type_str(protocol), protocol, + (unsigned long long)(uintptr_t)&p_ent->ramrod, + D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, + QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); + + return 0; + +err: + qed_sp_destroy_request(p_hwfn, p_ent); + + return -EINVAL; +} + +static enum tunnel_clss qed_tunn_clss_to_fw_clss(u8 type) +{ + switch (type) { + case QED_TUNN_CLSS_MAC_VLAN: + return TUNNEL_CLSS_MAC_VLAN; + case QED_TUNN_CLSS_MAC_VNI: + return TUNNEL_CLSS_MAC_VNI; + case QED_TUNN_CLSS_INNER_MAC_VLAN: + return TUNNEL_CLSS_INNER_MAC_VLAN; + case QED_TUNN_CLSS_INNER_MAC_VNI: + return TUNNEL_CLSS_INNER_MAC_VNI; + case QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE: + return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE; + default: + return TUNNEL_CLSS_MAC_VLAN; + } +} + +static void +qed_set_pf_update_tunn_mode(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src, bool b_pf_start) +{ + if (p_src->vxlan.b_update_mode || b_pf_start) + p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled; + + if (p_src->l2_gre.b_update_mode || b_pf_start) + p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled; + + if (p_src->ip_gre.b_update_mode || b_pf_start) + p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled; + + if (p_src->l2_geneve.b_update_mode || b_pf_start) + p_tun->l2_geneve.b_mode_enabled = + p_src->l2_geneve.b_mode_enabled; + + if (p_src->ip_geneve.b_update_mode || b_pf_start) + p_tun->ip_geneve.b_mode_enabled = + p_src->ip_geneve.b_mode_enabled; +} + +static void qed_set_tunn_cls_info(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src) +{ + int type; + + p_tun->b_update_rx_cls = p_src->b_update_rx_cls; + p_tun->b_update_tx_cls = p_src->b_update_tx_cls; + + type = qed_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); + p_tun->vxlan.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); + p_tun->l2_gre.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); + p_tun->ip_gre.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); + p_tun->l2_geneve.tun_cls = type; + type = qed_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); + p_tun->ip_geneve.tun_cls = type; +} + +static void qed_set_tunn_ports(struct qed_tunnel_info *p_tun, + struct qed_tunnel_info *p_src) +{ + p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port; + p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port; + + if (p_src->geneve_port.b_update_port) + p_tun->geneve_port.port = p_src->geneve_port.port; + + if (p_src->vxlan_port.b_update_port) + p_tun->vxlan_port.port = p_src->vxlan_port.port; +} + +static void +__qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, + struct qed_tunn_update_type *tun_type) +{ + *p_tunn_cls = tun_type->tun_cls; +} + +static void +qed_set_ramrod_tunnel_param(u8 *p_tunn_cls, + struct qed_tunn_update_type *tun_type, + u8 *p_update_port, + __le16 *p_port, + struct qed_tunn_update_udp_port *p_udp_port) +{ + __qed_set_ramrod_tunnel_param(p_tunn_cls, tun_type); + if (p_udp_port->b_update_port) { + *p_update_port = 1; + *p_port = cpu_to_le16(p_udp_port->port); + } +} + +static void +qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + + qed_set_pf_update_tunn_mode(p_tun, p_src, false); + qed_set_tunn_cls_info(p_tun, p_src); + qed_set_tunn_ports(p_tun, p_src); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tun->ip_geneve); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tun->l2_gre); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tun->ip_gre); + + p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls; +} + +static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tun) +{ + qed_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled, + p_tun->ip_gre.b_mode_enabled); + qed_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled); + + qed_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled); +} + +static void qed_set_hw_tunn_mode_port(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn) +{ + if (p_tunn->vxlan_port.b_update_port) + qed_set_vxlan_dest_port(p_hwfn, p_ptt, + p_tunn->vxlan_port.port); + + if (p_tunn->geneve_port.b_update_port) + qed_set_geneve_dest_port(p_hwfn, p_ptt, + p_tunn->geneve_port.port); + + qed_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn); +} + +static void +qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_src, + struct pf_start_tunnel_config *p_tunn_cfg) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + + if (!p_src) + return; + + qed_set_pf_update_tunn_mode(p_tun, p_src, true); + qed_set_tunn_cls_info(p_tun, p_src); + qed_set_tunn_ports(p_tun, p_src); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan, + &p_tun->vxlan, + &p_tunn_cfg->set_vxlan_udp_port_flg, + &p_tunn_cfg->vxlan_udp_port, + &p_tun->vxlan_port); + + qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve, + &p_tun->l2_geneve, + &p_tunn_cfg->set_geneve_udp_port_flg, + &p_tunn_cfg->geneve_udp_port, + &p_tun->geneve_port); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve, + &p_tun->ip_geneve); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre, + &p_tun->l2_gre); + + __qed_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre, + &p_tun->ip_gre); +} + +int qed_sp_pf_start(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn, + bool allow_npar_tx_switch) +{ + struct outer_tag_config_struct *outer_tag_config; + struct pf_start_ramrod_data *p_ramrod = NULL; + u16 sb = qed_int_get_sp_sb_id(p_hwfn); + u8 sb_index = p_hwfn->p_eq->eq_sb_index; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + u8 page_cnt, i; + int rc; + + /* update initial eq producer */ + qed_eq_prod_update(p_hwfn, + qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); + + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_START, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.pf_start; + + p_ramrod->event_ring_sb_id = cpu_to_le16(sb); + p_ramrod->event_ring_sb_index = sb_index; + p_ramrod->path_id = QED_PATH_ID(p_hwfn); + p_ramrod->dont_log_ramrods = 0; + p_ramrod->log_type_mask = cpu_to_le16(0xf); + + if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) + p_ramrod->mf_mode = MF_OVLAN; + else + p_ramrod->mf_mode = MF_NPAR; + + outer_tag_config = &p_ramrod->outer_tag_config; + outer_tag_config->outer_tag.tci = cpu_to_le16(p_hwfn->hw_info.ovlan); + + if (test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits)) { + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021Q); + } else if (test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)) { + outer_tag_config->outer_tag.tpid = cpu_to_le16(ETH_P_8021AD); + outer_tag_config->enable_stag_pri_change = 1; + } + + outer_tag_config->pri_map_valid = 1; + for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) + outer_tag_config->inner_to_outer_pri_map[i] = i; + + /* enable_stag_pri_change should be set if port is in BD mode or, + * UFP with Host Control mode. + */ + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) { + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + outer_tag_config->enable_stag_pri_change = 1; + else + outer_tag_config->enable_stag_pri_change = 0; + + outer_tag_config->outer_tag.tci |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); + } + + /* Place EQ address in RAMROD */ + DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, + qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain)); + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); + p_ramrod->event_ring_num_pages = page_cnt; + + /* Place consolidation queue address in ramrod */ + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr, + qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain)); + page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain); + p_ramrod->consolid_q_num_pages = page_cnt; + + qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); + + if (test_bit(QED_MF_INTER_PF_SWITCH, &p_hwfn->cdev->mf_bits)) + p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_FCOE: + p_ramrod->personality = PERSONALITY_FCOE; + break; + case QED_PCI_ISCSI: + case QED_PCI_NVMETCP: + p_ramrod->personality = PERSONALITY_TCP_ULP; + break; + case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown personality %d\n", + p_hwfn->hw_info.personality); + p_ramrod->personality = PERSONALITY_ETH; + } + + if (p_hwfn->cdev->p_iov_info) { + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + + p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_iov->total_vfs; + } + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n", + sb, sb_index, outer_tag_config->outer_tag.tci); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + + if (p_tunn) + qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, + &p_hwfn->cdev->tunnel); + + return rc; +} + +int qed_sp_pf_update(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, + &p_ent->ramrod.pf_update); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_UNKNOWN) { + DP_INFO(p_hwfn, "Invalid priority type %d\n", + p_hwfn->ufp_info.pri_type); + return -EINVAL; + } + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_enable_stag_pri_change = true; + if (p_hwfn->ufp_info.pri_type == QED_UFP_PRI_OS) + p_ent->ramrod.pf_update.enable_stag_pri_change = 1; + else + p_ent->ramrod.pf_update.enable_stag_pri_change = 0; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +/* Set pf update ramrod command params */ +int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_tunnel_info *p_tunn, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + if (IS_VF(p_hwfn->cdev)) + return qed_vf_pf_tunnel_param_update(p_hwfn, p_tunn); + + if (!p_tunn) + return -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + qed_tunn_set_pf_update_params(p_hwfn, p_tunn, + &p_ent->ramrod.pf_update.tunnel_config); + + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + qed_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->cdev->tunnel); + + return rc; +} + +int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) +{ + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_CB; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc) + return rc; + + p_ent->ramrod.pf_update.update_mf_vlan_flag = true; + p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); + if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) + p_ent->ramrod.pf_update.mf_vlan |= + cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c new file mode 100644 index 0000000000..d01b9245f8 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -0,0 +1,1051 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/types.h> +#include <asm/byteorder.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/string.h> +#include "qed.h" +#include "qed_cxt.h" +#include "qed_dev_api.h" +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_int.h" +#include "qed_iscsi.h" +#include "qed_mcp.h" +#include "qed_ooo.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_rdma.h" + +/*************************************************************************** + * Structures & Definitions + ***************************************************************************/ + +#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (1000) +#define SPQ_BLOCK_SLEEP_MS (5) + +/*************************************************************************** + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ +static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, u8 fw_return_code) +{ + struct qed_spq_comp_done *comp_done; + + comp_done = (struct qed_spq_comp_done *)cookie; + + comp_done->fw_return_code = fw_return_code; + + /* Make sure completion done is visible on waiting thread */ + smp_store_release(&comp_done->done, 0x1); +} + +static int __qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool sleep_between_iter) +{ + struct qed_spq_comp_done *comp_done; + u32 iter_cnt; + + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; + iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER + : SPQ_BLOCK_DELAY_MAX_ITER; + + while (iter_cnt--) { + /* Validate we receive completion update */ + if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return 0; + } + + if (sleep_between_iter) + msleep(SPQ_BLOCK_SLEEP_MS); + else + udelay(SPQ_BLOCK_DELAY_US); + } + + return -EBUSY; +} + +static int qed_spq_block(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct qed_spq_comp_done *comp_done; + struct qed_ptt *p_ptt; + int rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (!rc) + return 0; + } + + /* Move to polling with a sleeping period between iterations */ + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, "ptt, failed to acquire\n"); + return -EAGAIN; + } + + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); + rc = qed_mcp_drain(p_hwfn, p_ptt); + qed_ptt_release(p_hwfn, p_ptt); + if (rc) { + DP_NOTICE(p_hwfn, "MCP drain failed\n"); + goto err; + } + + /* Retry after drain */ + rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (!rc) + return 0; + + comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie; + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return 0; + } +err: + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) + return -EBUSY; + qed_hw_err_notify(p_hwfn, p_ptt, QED_HW_ERR_RAMROD_FAIL, + "Ramrod is stuck [CID %08x %s:%02x %s:%02x echo %04x]\n", + le32_to_cpu(p_ent->elem.hdr.cid), + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), + p_ent->elem.hdr.cmd_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, + le16_to_cpu(p_ent->elem.hdr.echo)); + qed_ptt_release(p_hwfn, p_ptt); + + return -EBUSY; +} + +/*************************************************************************** + * SPQ entries inner API + ***************************************************************************/ +static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + p_ent->flags = 0; + + switch (p_ent->comp_mode) { + case QED_SPQ_MODE_EBLOCK: + case QED_SPQ_MODE_BLOCK: + p_ent->comp_cb.function = qed_spq_blocking_cb; + break; + case QED_SPQ_MODE_CB: + break; + default: + DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return -EINVAL; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Ramrod hdr: [CID 0x%08x %s:0x%02x %s:0x%02x] Data ptr: [%08x:%08x] Cmpltion Mode: %s\n", + p_ent->elem.hdr.cid, + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), + p_ent->elem.hdr.cmd_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, + D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, + QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + return 0; +} + +/*************************************************************************** + * HSI access + ***************************************************************************/ +static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, + struct qed_spq *p_spq) +{ + struct core_conn_context *p_cxt; + struct qed_cxt_info cxt_info; + u16 physical_q; + int rc; + + cxt_info.iid = p_spq->cid; + + rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info); + + if (rc < 0) { + DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n", + p_spq->cid); + return; + } + + p_cxt = cxt_info.p_cxt; + + SET_FIELD(p_cxt->xstorm_ag_context.flags10, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags1, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags9, + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + + /* QM physical queue */ + physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q); + + p_cxt->xstorm_st_context.spq_base_addr.lo = + DMA_LO_LE(p_spq->chain.p_phys_addr); + p_cxt->xstorm_st_context.spq_base_addr.hi = + DMA_HI_LE(p_spq->chain.p_phys_addr); +} + +static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, + struct qed_spq *p_spq, struct qed_spq_entry *p_ent) +{ + struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; + u16 echo = qed_chain_get_prod_idx(p_chain); + struct slow_path_element *elem; + + p_ent->elem.hdr.echo = cpu_to_le16(echo); + elem = qed_chain_produce(p_chain); + if (!elem) { + DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n"); + return -EINVAL; + } + + *elem = p_ent->elem; /* struct assignment */ + + /* send a doorbell on the slow hwfn session */ + p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); + + /* make sure the SPQE is updated before the doorbell */ + wmb(); + + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); + + /* make sure doorbell is rang */ + wmb(); + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", + p_spq->db_addr_offset, + p_spq->cid, + p_db_data->params, + p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); + + return 0; +} + +/*************************************************************************** + * Asynchronous events + ***************************************************************************/ +static int +qed_async_event_completion(struct qed_hwfn *p_hwfn, + struct event_ring_entry *p_eqe) +{ + qed_spq_async_comp_cb cb; + + if (!p_hwfn->p_spq) + return -EINVAL; + + if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { + DP_ERR(p_hwfn, "Wrong protocol: %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), + p_eqe->protocol_id); + + return -EINVAL; + } + + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; + if (cb) { + return cb(p_hwfn, p_eqe->opcode, p_eqe->echo, + &p_eqe->data, p_eqe->fw_return_code); + } else { + DP_NOTICE(p_hwfn, + "Unknown Async completion for %s:%d\n", + qed_get_protocol_type_str(p_eqe->protocol_id), + p_eqe->protocol_id); + + return -EINVAL; + } +} + +int +qed_spq_register_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id, + qed_spq_async_comp_cb cb) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return -EINVAL; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; + return 0; +} + +void +qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn, + enum protocol_type protocol_id) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL; +} + +/*************************************************************************** + * EQ API + ***************************************************************************/ +void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod) +{ + u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_EQE_CONS, p_hwfn->rel_pf_id); + + REG_WR16(p_hwfn, addr, prod); +} + +int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) +{ + struct qed_eq *p_eq = cookie; + struct qed_chain *p_chain = &p_eq->chain; + int rc = 0; + + /* take a snapshot of the FW consumer */ + u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons); + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); + + /* Need to guarantee the fw_cons index we use points to a usuable + * element (to comply with our chain), so our macros would comply + */ + if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) == + qed_chain_get_usable_per_page(p_chain)) + fw_cons_idx += qed_chain_get_unusable_per_page(p_chain); + + /* Complete current segment of eq entries */ + while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) { + struct event_ring_entry *p_eqe = qed_chain_consume(p_chain); + + if (!p_eqe) { + rc = -EINVAL; + break; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "op %x prot %x res0 %x echo %x fwret %x flags %x\n", + p_eqe->opcode, + p_eqe->protocol_id, + p_eqe->reserved0, + le16_to_cpu(p_eqe->echo), + p_eqe->fw_return_code, + p_eqe->flags); + + if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { + if (qed_async_event_completion(p_hwfn, p_eqe)) + rc = -EINVAL; + } else if (qed_spq_completion(p_hwfn, + p_eqe->echo, + p_eqe->fw_return_code, + &p_eqe->data)) { + rc = -EINVAL; + } + + qed_chain_recycle_consumed(p_chain); + } + + qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); + + /* Attempt to post pending requests */ + spin_lock_bh(&p_hwfn->p_spq->lock); + rc = qed_spq_pend_post(p_hwfn); + spin_unlock_bh(&p_hwfn->p_spq->lock); + + return rc; +} + +int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) +{ + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = num_elem, + .elem_size = sizeof(union event_ring_element), + }; + struct qed_eq *p_eq; + int ret; + + /* Allocate EQ struct */ + p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); + if (!p_eq) + return -ENOMEM; + + ret = qed_chain_alloc(p_hwfn->cdev, &p_eq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate EQ chain\n"); + goto eq_allocate_fail; + } + + /* register EQ completion on the SP SB */ + qed_int_register_cb(p_hwfn, qed_eq_completion, + p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); + + p_hwfn->p_eq = p_eq; + return 0; + +eq_allocate_fail: + kfree(p_eq); + + return ret; +} + +void qed_eq_setup(struct qed_hwfn *p_hwfn) +{ + qed_chain_reset(&p_hwfn->p_eq->chain); +} + +void qed_eq_free(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_eq) + return; + + qed_chain_free(p_hwfn->cdev, &p_hwfn->p_eq->chain); + + kfree(p_hwfn->p_eq); + p_hwfn->p_eq = NULL; +} + +/*************************************************************************** + * CQE API - manipulate EQ functionality + ***************************************************************************/ +static int qed_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe, + enum protocol_type protocol) +{ + if (IS_VF(p_hwfn->cdev)) + return 0; + + /* @@@tmp - it's possible we'll eventually want to handle some + * actual commands that can arrive here, but for now this is only + * used to complete the ramrod using the echo value on the cqe + */ + return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL); +} + +int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe) +{ + int rc; + + rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); + if (rc) + DP_NOTICE(p_hwfn, + "Failed to handle RXQ CQE [cmd 0x%02x]\n", + cqe->ramrod_cmd_id); + + return rc; +} + +/*************************************************************************** + * Slow hwfn Queue (spq) + ***************************************************************************/ +void qed_spq_setup(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + struct core_db_data *p_db_data; + void __iomem *db_addr; + dma_addr_t p_phys = 0; + u32 i, capacity; + int rc; + + INIT_LIST_HEAD(&p_spq->pending); + INIT_LIST_HEAD(&p_spq->completion_pending); + INIT_LIST_HEAD(&p_spq->free_pool); + INIT_LIST_HEAD(&p_spq->unlimited_pending); + spin_lock_init(&p_spq->lock); + + /* SPQ empty pool */ + p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); + p_virt = p_spq->p_virt; + + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { + DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); + + list_add_tail(&p_virt->list, &p_spq->free_pool); + + p_virt++; + p_phys += sizeof(struct qed_spq_entry); + } + + /* Statistics */ + p_spq->normal_count = 0; + p_spq->comp_count = 0; + p_spq->comp_sent_count = 0; + p_spq->unlimited_pending_count = 0; + + bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE); + p_spq->comp_bitmap_idx = 0; + + /* SPQ cid, cannot fail */ + qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); + qed_spq_hw_initialize(p_hwfn, p_spq); + + /* reset the chain itself */ + qed_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + memset(p_db_data, 0, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); +} + +int qed_spq_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_SINGLE, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .elem_size = sizeof(struct slow_path_element), + }; + struct qed_dev *cdev = p_hwfn->cdev; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity; + int ret; + + /* SPQ struct */ + p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL); + if (!p_spq) + return -ENOMEM; + + /* SPQ ring */ + ret = qed_chain_alloc(cdev, &p_spq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate SPQ chain\n"); + goto spq_chain_alloc_fail; + } + + /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); + ret = -ENOMEM; + + p_virt = dma_alloc_coherent(&cdev->pdev->dev, + capacity * sizeof(struct qed_spq_entry), + &p_phys, GFP_KERNEL); + if (!p_virt) + goto spq_alloc_fail; + + p_spq->p_virt = p_virt; + p_spq->p_phys = p_phys; + p_hwfn->p_spq = p_spq; + + return 0; + +spq_alloc_fail: + qed_chain_free(cdev, &p_spq->chain); +spq_chain_alloc_fail: + kfree(p_spq); + + return ret; +} + +void qed_spq_free(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + void __iomem *db_addr; + u32 capacity; + + if (!p_spq) + return; + + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); + + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + capacity * + sizeof(struct qed_spq_entry), + p_spq->p_virt, p_spq->p_phys); + } + + qed_chain_free(p_hwfn->cdev, &p_spq->chain); + kfree(p_spq); + p_hwfn->p_spq = NULL; +} + +int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_ent = NULL; + int rc = 0; + + spin_lock_bh(&p_spq->lock); + + if (list_empty(&p_spq->free_pool)) { + p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC); + if (!p_ent) { + DP_NOTICE(p_hwfn, + "Failed to allocate an SPQ entry for a pending ramrod\n"); + rc = -ENOMEM; + goto out_unlock; + } + p_ent->queue = &p_spq->unlimited_pending; + } else { + p_ent = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, list); + list_del(&p_ent->list); + p_ent->queue = &p_spq->pending; + } + + *pp_ent = p_ent; + +out_unlock: + spin_unlock_bh(&p_spq->lock); + return rc; +} + +/* Locked variant; Should be called while the SPQ lock is taken */ +static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent) +{ + list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool); +} + +void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent) +{ + spin_lock_bh(&p_hwfn->p_spq->lock); + __qed_spq_return_entry(p_hwfn, p_ent); + spin_unlock_bh(&p_hwfn->p_spq->lock); +} + +/** + * qed_spq_add_entry() - Add a new entry to the pending list. + * Should be used while lock is being held. + * + * @p_hwfn: HW device data. + * @p_ent: An entry to add. + * @priority: Desired priority. + * + * Adds an entry to the pending list is there is room (an empty + * element is available in the free_pool), or else places the + * entry in the unlimited_pending pool. + * + * Return: zero on success, -EINVAL on invalid @priority. + */ +static int qed_spq_add_entry(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, + enum spq_priority priority) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + + if (p_ent->queue == &p_spq->unlimited_pending) { + if (list_empty(&p_spq->free_pool)) { + list_add_tail(&p_ent->list, &p_spq->unlimited_pending); + p_spq->unlimited_pending_count++; + + return 0; + } else { + struct qed_spq_entry *p_en2; + + p_en2 = list_first_entry(&p_spq->free_pool, + struct qed_spq_entry, list); + list_del(&p_en2->list); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; + + *p_en2 = *p_ent; + + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) + kfree(p_ent); + else + p_ent->post_ent = p_en2; + + p_ent = p_en2; + } + } + + /* entry is to be placed in 'pending' queue */ + switch (priority) { + case QED_SPQ_PRIORITY_NORMAL: + list_add_tail(&p_ent->list, &p_spq->pending); + p_spq->normal_count++; + break; + case QED_SPQ_PRIORITY_HIGH: + list_add(&p_ent->list, &p_spq->pending); + p_spq->high_count++; + break; + default: + return -EINVAL; + } + + return 0; +} + +/*************************************************************************** + * Accessor + ***************************************************************************/ +u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_spq) + return 0xffffffff; /* illegal */ + return p_hwfn->p_spq->cid; +} + +/*************************************************************************** + * Posting new Ramrods + ***************************************************************************/ +static int qed_spq_post_list(struct qed_hwfn *p_hwfn, + struct list_head *head, u32 keep_reserve) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + int rc; + + while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve && + !list_empty(head)) { + struct qed_spq_entry *p_ent = + list_first_entry(head, struct qed_spq_entry, list); + list_move_tail(&p_ent->list, &p_spq->completion_pending); + p_spq->comp_sent_count++; + + rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent); + if (rc) { + list_del(&p_ent->list); + __qed_spq_return_entry(p_hwfn, p_ent); + return rc; + } + } + + return 0; +} + +int qed_spq_pend_post(struct qed_hwfn *p_hwfn) +{ + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_ent = NULL; + + while (!list_empty(&p_spq->free_pool)) { + if (list_empty(&p_spq->unlimited_pending)) + break; + + p_ent = list_first_entry(&p_spq->unlimited_pending, + struct qed_spq_entry, list); + if (!p_ent) + return -EINVAL; + + list_del(&p_ent->list); + + qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + } + + return qed_spq_post_list(p_hwfn, &p_spq->pending, + SPQ_HIGH_PRI_RESERVE_DEFAULT); +} + +static void qed_spq_recov_set_ret_code(struct qed_spq_entry *p_ent, + u8 *fw_return_code) +{ + if (!fw_return_code) + return; + + if (p_ent->elem.hdr.protocol_id == PROTOCOLID_ROCE || + p_ent->elem.hdr.protocol_id == PROTOCOLID_IWARP) + *fw_return_code = RDMA_RETURN_OK; +} + +/* Avoid overriding of SPQ entries when getting out-of-order completions, by + * marking the completions in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ +static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo) +{ + u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE; + struct qed_spq *p_spq = p_hwfn->p_spq; + + __set_bit(pos, p_spq->p_comp_bitmap); + while (test_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap)) { + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); + p_spq->comp_bitmap_idx++; + qed_chain_return_produced(&p_spq->chain); + } +} + +int qed_spq_post(struct qed_hwfn *p_hwfn, + struct qed_spq_entry *p_ent, u8 *fw_return_code) +{ + int rc = 0; + struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL; + bool b_ret_ent = true; + bool eblock; + + if (!p_hwfn) + return -EINVAL; + + if (!p_ent) { + DP_NOTICE(p_hwfn, "Got a NULL pointer\n"); + return -EINVAL; + } + + if (p_hwfn->cdev->recov_in_prog) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Recovery is in progress. Skip spq post [%s:%02x %s:%02x]\n", + qed_get_ramrod_cmd_id_str(p_ent->elem.hdr.protocol_id, + p_ent->elem.hdr.cmd_id), + p_ent->elem.hdr.cmd_id, + qed_get_protocol_type_str(p_ent->elem.hdr.protocol_id), + p_ent->elem.hdr.protocol_id); + + /* Let the flow complete w/o any error handling */ + qed_spq_recov_set_ret_code(p_ent, fw_return_code); + return 0; + } + + /* Complete the entry */ + rc = qed_spq_fill_entry(p_hwfn, p_ent); + + spin_lock_bh(&p_spq->lock); + + /* Check return value after LOCK is taken for cleaner error flow */ + if (rc) + goto spq_post_fail; + + /* Check if entry is in block mode before qed_spq_add_entry, + * which might kfree p_ent. + */ + eblock = (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK); + + /* Add the request to the pending queue */ + rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + if (rc) + goto spq_post_fail; + + rc = qed_spq_pend_post(p_hwfn); + if (rc) { + /* Since it's possible that pending failed for a different + * entry [although unlikely], the failed entry was already + * dealt with; No need to return it here. + */ + b_ret_ent = false; + goto spq_post_fail; + } + + spin_unlock_bh(&p_spq->lock); + + if (eblock) { + /* For entries in QED BLOCK mode, the completion code cannot + * perform the necessary cleanup - if it did, we couldn't + * access p_ent here to see whether it's successful or not. + * Thus, after gaining the answer perform the cleanup here. + */ + rc = qed_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending); + + if (p_ent->queue == &p_spq->unlimited_pending) { + struct qed_spq_entry *p_post_ent = p_ent->post_ent; + + kfree(p_ent); + + /* Return the entry which was actually posted */ + p_ent = p_post_ent; + } + + if (rc) + goto spq_post_fail2; + + /* return to pool */ + qed_spq_return_entry(p_hwfn, p_ent); + } + return rc; + +spq_post_fail2: + spin_lock_bh(&p_spq->lock); + list_del(&p_ent->list); + qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo); + +spq_post_fail: + /* return to the free pool */ + if (b_ret_ent) + __qed_spq_return_entry(p_hwfn, p_ent); + spin_unlock_bh(&p_spq->lock); + + return rc; +} + +int qed_spq_completion(struct qed_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data) +{ + struct qed_spq *p_spq; + struct qed_spq_entry *p_ent = NULL; + struct qed_spq_entry *tmp; + struct qed_spq_entry *found = NULL; + + if (!p_hwfn) + return -EINVAL; + + p_spq = p_hwfn->p_spq; + if (!p_spq) + return -EINVAL; + + spin_lock_bh(&p_spq->lock); + list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) { + if (p_ent->elem.hdr.echo == echo) { + list_del(&p_ent->list); + qed_spq_comp_bmap_update(p_hwfn, echo); + p_spq->comp_count++; + found = p_ent; + break; + } + + /* This is relatively uncommon - depends on scenarios + * which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n", + le16_to_cpu(echo), + le16_to_cpu(p_ent->elem.hdr.echo)); + } + + /* Release lock before callback, as callback may post + * an additional ramrod. + */ + spin_unlock_bh(&p_spq->lock); + + if (!found) { + DP_NOTICE(p_hwfn, + "Failed to find an entry this EQE [echo %04x] completes\n", + le16_to_cpu(echo)); + return -EEXIST; + } + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "Complete EQE [echo %04x]: func %p cookie %p)\n", + le16_to_cpu(echo), + p_ent->comp_cb.function, p_ent->comp_cb.cookie); + if (found->comp_cb.function) + found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, + fw_return_code); + else + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Got a completion without a callback function\n"); + + if (found->comp_mode != QED_SPQ_MODE_EBLOCK) + /* EBLOCK is responsible for returning its own entry into the + * free list. + */ + qed_spq_return_entry(p_hwfn, found); + + return 0; +} + +#define QED_SPQ_CONSQ_ELEM_SIZE 0x80 + +int qed_consq_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_chain_init_params params = { + .mode = QED_CHAIN_MODE_PBL, + .intended_use = QED_CHAIN_USE_TO_PRODUCE, + .cnt_type = QED_CHAIN_CNT_TYPE_U16, + .num_elems = QED_CHAIN_PAGE_SIZE / QED_SPQ_CONSQ_ELEM_SIZE, + .elem_size = QED_SPQ_CONSQ_ELEM_SIZE, + }; + struct qed_consq *p_consq; + int ret; + + /* Allocate ConsQ struct */ + p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); + if (!p_consq) + return -ENOMEM; + + /* Allocate and initialize ConsQ chain */ + ret = qed_chain_alloc(p_hwfn->cdev, &p_consq->chain, ¶ms); + if (ret) { + DP_NOTICE(p_hwfn, "Failed to allocate ConsQ chain"); + goto consq_alloc_fail; + } + + p_hwfn->p_consq = p_consq; + + return 0; + +consq_alloc_fail: + kfree(p_consq); + + return ret; +} + +void qed_consq_setup(struct qed_hwfn *p_hwfn) +{ + qed_chain_reset(&p_hwfn->p_consq->chain); +} + +void qed_consq_free(struct qed_hwfn *p_hwfn) +{ + if (!p_hwfn->p_consq) + return; + + qed_chain_free(p_hwfn->cdev, &p_hwfn->p_consq->chain); + + kfree(p_hwfn->p_consq); + p_hwfn->p_consq = NULL; +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c new file mode 100644 index 0000000000..fa167b1aa0 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -0,0 +1,5305 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/etherdevice.h> +#include <linux/crc32.h> +#include <linux/vmalloc.h> +#include <linux/crash_dump.h> +#include <linux/qed/qed_iov_if.h> +#include "qed_cxt.h" +#include "qed_hsi.h" +#include "qed_iro_hsi.h" +#include "qed_hw.h" +#include "qed_init_ops.h" +#include "qed_int.h" +#include "qed_mcp.h" +#include "qed_reg_addr.h" +#include "qed_sp.h" +#include "qed_sriov.h" +#include "qed_vf.h" +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid); + +static u16 qed_vf_from_entity_id(__le16 entity_id) +{ + return le16_to_cpu(entity_id) - MAX_NUM_PFS; +} + +static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) +{ + u8 legacy = 0; + + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + legacy |= QED_QCID_LEGACY_VF_RX_PROD; + + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + legacy |= QED_QCID_LEGACY_VF_CID; + + return legacy; +} + +/* IOV ramrods */ +static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) +{ + struct vf_start_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + u8 fp_minor; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_vf->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_START, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_start; + + p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid); + + switch (p_hwfn->hw_info.personality) { + case QED_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: + p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; + break; + default: + DP_NOTICE(p_hwfn, "Unknown VF personality %d\n", + p_hwfn->hw_info.personality); + qed_sp_destroy_request(p_hwfn, p_ent); + return -EINVAL; + } + + fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor; + if (fp_minor > ETH_HSI_VER_MINOR && + fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n", + p_vf->abs_vf_id, + ETH_HSI_VER_MAJOR, + fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + fp_minor = ETH_HSI_VER_MINOR; + } + + p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR; + p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Starting using HSI %02x.%02x\n", + p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_stop_ramrod_data *p_ramrod = NULL; + struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; + int rc = -EINVAL; + + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; + + rc = qed_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_STOP, + PROTOCOLID_COMMON, &init_data); + if (rc) + return rc; + + p_ramrod = &p_ent->ramrod.vf_stop; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + + return qed_spq_post(p_hwfn, p_ent, NULL); +} + +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious) +{ + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return false; + } + + if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) || + (rel_vf_id < 0)) + return false; + + if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) && + b_enabled_only) + return false; + + if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) && + b_non_malicious) + return false; + + return true; +} + +static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->cdev, "No iov info\n"); + return NULL; + } + + if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, + b_enabled_only, false)) + vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; + else + DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n", + __func__, relative_vf_id); + + return vf; +} + +static struct qed_queue_cid * +qed_iov_get_vf_rx_queue_cid(struct qed_vf_queue *p_queue) +{ + int i; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (p_queue->cids[i].p_cid && !p_queue->cids[i].b_is_tx) + return p_queue->cids[i].p_cid; + } + + return NULL; +} + +enum qed_iov_validate_q_mode { + QED_IOV_VALIDATE_Q_NA, + QED_IOV_VALIDATE_Q_ENABLE, + QED_IOV_VALIDATE_Q_DISABLE, +}; + +static bool qed_iov_validate_queue_mode(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + u16 qid, + enum qed_iov_validate_q_mode mode, + bool b_is_tx) +{ + int i; + + if (mode == QED_IOV_VALIDATE_Q_NA) + return true; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + struct qed_vf_queue_cid *p_qcid; + + p_qcid = &p_vf->vf_queues[qid].cids[i]; + + if (!p_qcid->p_cid) + continue; + + if (p_qcid->b_is_tx != b_is_tx) + continue; + + return mode == QED_IOV_VALIDATE_Q_ENABLE; + } + + /* In case we haven't found any valid cid, then its disabled */ + return mode == QED_IOV_VALIDATE_Q_DISABLE; +} + +static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + u16 rx_qid, + enum qed_iov_validate_q_mode mode) +{ + if (rx_qid >= p_vf->num_rxqs) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs); + return false; + } + + return qed_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid, mode, false); +} + +static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + u16 tx_qid, + enum qed_iov_validate_q_mode mode) +{ + if (tx_qid >= p_vf->num_txqs) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n", + p_vf->abs_vf_id, tx_qid, p_vf->num_txqs); + return false; + } + + return qed_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid, mode, true); +} + +static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u16 sb_idx) +{ + int i; + + for (i = 0; i < p_vf->num_sbs; i++) + if (p_vf->igu_sbs[i] == sb_idx) + return true; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n", + p_vf->abs_vf_id, sb_idx, p_vf->num_sbs); + + return false; +} + +static bool qed_iov_validate_active_rxq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_rxqs; i++) + if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, + QED_IOV_VALIDATE_Q_ENABLE, + false)) + return true; + + return false; +} + +static bool qed_iov_validate_active_txq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u8 i; + + for (i = 0; i < p_vf->num_txqs; i++) + if (qed_iov_validate_queue_mode(p_hwfn, p_vf, i, + QED_IOV_VALIDATE_Q_ENABLE, + true)) + return true; + + return false; +} + +static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn, + int vfid, struct qed_ptt *p_ptt) +{ + struct qed_bulletin_content *p_bulletin; + int crc_size = sizeof(p_bulletin->crc); + struct qed_dmae_params params; + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return -EINVAL; + + if (!p_vf->vf_bulletin) + return -EINVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + /* Increment bulletin board version and compute crc */ + p_bulletin->version++; + p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size, + p_vf->bulletin.size - crc_size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", + p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); + + /* propagate bulletin board via dmae to vm memory */ + memset(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); + params.dst_vfid = p_vf->abs_vf_id; + return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, + p_vf->vf_bulletin, p_vf->bulletin.size / 4, + ¶ms); +} + +static int qed_iov_pci_cfg_info(struct qed_dev *cdev) +{ + struct qed_hw_sriov_info *iov = cdev->p_iov_info; + int pos = iov->pos; + + DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos); + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs); + + pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + if (iov->num_vfs) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n"); + iov->num_vfs = 0; + } + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + + pci_read_config_word(cdev->pdev, + pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); + + pci_read_config_dword(cdev->pdev, + pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + + pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap); + + pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + DP_VERBOSE(cdev, + QED_MSG_IOV, + "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n", + iov->nres, + iov->cap, + iov->ctrl, + iov->total_vfs, + iov->initial_vfs, + iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz); + + /* Some sanity checks */ + if (iov->num_vfs > NUM_OF_VFS(cdev) || + iov->total_vfs > NUM_OF_VFS(cdev)) { + /* This can happen only due to a bug. In this case we set + * num_vfs to zero to avoid memory corruption in the code that + * assumes max number of vfs + */ + DP_NOTICE(cdev, + "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n", + iov->num_vfs); + + iov->num_vfs = 0; + iov->total_vfs = 0; + } + + return 0; +} + +static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + struct qed_bulletin_content *p_bulletin_virt; + dma_addr_t req_p, rply_p, bulletin_p; + union pfvf_tlvs *p_reply_virt_addr; + union vfpf_tlvs *p_req_virt_addr; + u8 idx = 0; + + memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); + + p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; + req_p = p_iov_info->mbx_msg_phys_addr; + p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; + rply_p = p_iov_info->mbx_reply_phys_addr; + p_bulletin_virt = p_iov_info->p_bulletins; + bulletin_p = p_iov_info->bulletins_phys; + if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { + DP_ERR(p_hwfn, + "%s called without allocating mem first\n", __func__); + return; + } + + for (idx = 0; idx < p_iov->total_vfs; idx++) { + struct qed_vf_info *vf = &p_iov_info->vfs_array[idx]; + u32 concrete; + + vf->vf_mbx.req_virt = p_req_virt_addr + idx; + vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); + vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; + vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); + + vf->state = VF_STOPPED; + vf->b_init = false; + + vf->bulletin.phys = idx * + sizeof(struct qed_bulletin_content) + + bulletin_p; + vf->bulletin.p_virt = p_bulletin_virt + idx; + vf->bulletin.size = sizeof(struct qed_bulletin_content); + + vf->relative_vf_id = idx; + vf->abs_vf_id = idx + p_iov->first_vf_in_pf; + concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id); + vf->concrete_fid = concrete; + vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | + (vf->abs_vf_id << 8); + vf->vport_id = idx + 1; + + vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + } +} + +static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + void **p_v_addr; + u16 num_vfs = 0; + + num_vfs = p_hwfn->cdev->p_iov_info->total_vfs; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "%s for %d VFs\n", __func__, num_vfs); + + /* Allocate PF Mailbox buffer (per-VF) */ + p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_msg_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + &p_iov_info->mbx_msg_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + /* Allocate PF Mailbox Reply buffer (per-VF) */ + p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_reply_virt_addr; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + &p_iov_info->mbx_reply_phys_addr, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) * + num_vfs; + p_v_addr = &p_iov_info->p_bulletins; + *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + &p_iov_info->bulletins_phys, + GFP_KERNEL); + if (!*p_v_addr) + return -ENOMEM; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n", + p_iov_info->mbx_msg_virt_addr, + (u64)p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_reply_virt_addr, + (u64)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); + + return 0; +} + +static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + + if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_msg_size, + p_iov_info->mbx_msg_virt_addr, + p_iov_info->mbx_msg_phys_addr); + + if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->mbx_reply_size, + p_iov_info->mbx_reply_virt_addr, + p_iov_info->mbx_reply_phys_addr); + + if (p_iov_info->p_bulletins) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + p_iov_info->bulletins_size, + p_iov_info->p_bulletins, + p_iov_info->bulletins_phys); +} + +int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + struct qed_pf_iov *p_sriov; + + if (!IS_PF_SRIOV(p_hwfn)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No SR-IOV - no need for IOV db\n"); + return 0; + } + + p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL); + if (!p_sriov) + return -ENOMEM; + + p_hwfn->pf_iov_info = p_sriov; + + qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON, + qed_sriov_eqe_event); + + return qed_iov_allocate_vfdb(p_hwfn); +} + +void qed_iov_setup(struct qed_hwfn *p_hwfn) +{ + if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn)) + return; + + qed_iov_setup_vfdb(p_hwfn); +} + +void qed_iov_free(struct qed_hwfn *p_hwfn) +{ + qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON); + + if (IS_PF_SRIOV_ALLOC(p_hwfn)) { + qed_iov_free_vfdb(p_hwfn); + kfree(p_hwfn->pf_iov_info); + } +} + +void qed_iov_free_hw_info(struct qed_dev *cdev) +{ + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; +} + +int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + struct qed_dev *cdev = p_hwfn->cdev; + int pos; + int rc; + + if (is_kdump_kernel()) + return 0; + + if (IS_VF(p_hwfn->cdev)) + return 0; + + /* Learn the PCI configuration */ + pos = pci_find_ext_capability(p_hwfn->cdev->pdev, + PCI_EXT_CAP_ID_SRIOV); + if (!pos) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n"); + return 0; + } + + /* Allocate a new struct for IOV information */ + cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL); + if (!cdev->p_iov_info) + return -ENOMEM; + + cdev->p_iov_info->pos = pos; + + rc = qed_iov_pci_cfg_info(cdev); + if (rc) + return rc; + + /* We want PF IOV to be synonemous with the existence of p_iov_info; + * In case the capability is published but there are no VFs, simply + * de-allocate the struct. + */ + if (!cdev->p_iov_info->total_vfs) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "IOV capabilities, but no VFs are published\n"); + kfree(cdev->p_iov_info); + cdev->p_iov_info = NULL; + return 0; + } + + /* First VF index based on offset is tricky: + * - If ARI is supported [likely], offset - (16 - pf_id) would + * provide the number for eng0. 2nd engine Vfs would begin + * after the first engine's VFs. + * - If !ARI, VFs would start on next device. + * so offset - (256 - pf_id) would provide the number. + * Utilize the fact that (256 - pf_id) is achieved only by later + * to differentiate between the two. + */ + + if (p_hwfn->cdev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) { + u32 first = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 16; + + cdev->p_iov_info->first_vf_in_pf = first; + + if (QED_PATH_ID(p_hwfn)) + cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB; + } else { + u32 first = p_hwfn->cdev->p_iov_info->offset + + p_hwfn->abs_pf_id - 256; + + cdev->p_iov_info->first_vf_in_pf = first; + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "First VF in hwfn 0x%08x\n", + cdev->p_iov_info->first_vf_in_pf); + + return 0; +} + +static bool _qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, + int vfid, bool b_fail_malicious) +{ + /* Check PF supports sriov */ + if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) || + !IS_PF_SRIOV_ALLOC(p_hwfn)) + return false; + + /* Check VF validity */ + if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious)) + return false; + + return true; +} + +static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid) +{ + return _qed_iov_pf_sanity_check(p_hwfn, vfid, true); +} + +static void qed_iov_set_vf_to_disable(struct qed_dev *cdev, + u16 rel_vf_id, u8 to_disable) +{ + struct qed_vf_info *vf; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) + continue; + + vf->to_disable = to_disable; + } +} + +static void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable) +{ + u16 i; + + if (!IS_QED_SRIOV(cdev)) + return; + + for (i = 0; i < cdev->p_iov_info->total_vfs; i++) + qed_iov_set_vf_to_disable(cdev, i, to_disable); +} + +static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_vfid) +{ + qed_wr(p_hwfn, p_ptt, + PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, + 1 << (abs_vfid & 0x1f)); +} + +static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + int i; + + /* Set VF masks and configuration - pretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + /* iterate over all queues, clear sb consumer */ + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, true); +} + +static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, bool enable) +{ + u32 igu_vf_conf; + + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); + + if (enable) + igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; + else + igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; + + qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); +} + +static int +qed_iov_enable_vf_access_msix(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u8 abs_vf_id, u8 num_sbs) +{ + u8 current_max = 0; + int i; + + /* For AH onward, configuration is per-PF. Find maximum of all + * the currently enabled child VFs, and set the number to be that. + */ + if (!QED_IS_BB(p_hwfn->cdev)) { + qed_for_each_vf(p_hwfn, i) { + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)i, true); + if (!p_vf) + continue; + + current_max = max_t(u8, current_max, p_vf->num_sbs); + } + } + + if (num_sbs > current_max) + return qed_mcp_config_vf_msix(p_hwfn, p_ptt, + abs_vf_id, num_sbs); + + return 0; +} + +static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; + int rc; + + /* It's possible VF was previously considered malicious - + * clear the indication even if we're only going to disable VF. + */ + vf->b_malicious = false; + + if (vf->to_disable) + return 0; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Enable internal access for vf %x [abs %x]\n", + vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf)); + + qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf)); + + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + rc = qed_iov_enable_vf_access_msix(p_hwfn, p_ptt, + vf->abs_vf_id, vf->num_sbs); + if (rc) + return rc; + + qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); + STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); + + qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, + p_hwfn->hw_info.hw_mode); + + /* unpretend */ + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + vf->state = VF_FREE; + + return rc; +} + +/** + * qed_iov_config_perm_table() - Configure the permission zone table. + * + * @p_hwfn: HW device data. + * @p_ptt: PTT window for writing the registers. + * @vf: VF info data. + * @enable: The actual permission for this VF. + * + * In E4, queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} + */ +static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u8 enable) +{ + u32 reg_addr, val; + u16 qzone_id = 0; + int qid; + + for (qid = 0; qid < vf->num_rxqs; qid++) { + qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, + &qzone_id); + + reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; + val = enable ? (vf->abs_vf_id | BIT(8)) : 0; + qed_wr(p_hwfn, p_ptt, reg_addr, val); + } +} + +static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + /* Reset vf in IGU - interrupts are still disabled */ + qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1); + + /* Permission Table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true); +} + +static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, u16 num_rx_queues) +{ + struct qed_igu_block *p_block; + struct cau_sb_entry sb_entry; + int qid = 0; + u32 val = 0; + + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov) + num_rx_queues = p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues; + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); + + for (qid = 0; qid < num_rx_queues; qid++) { + p_block = qed_get_igu_free_sb(p_hwfn, false); + vf->igu_sbs[qid] = p_block->igu_sb_id; + p_block->status &= ~QED_IGU_STATUS_FREE; + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + qed_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * p_block->igu_sb_id, val); + + /* Configure igu sb in CAU which were marked valid */ + qed_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, vf->abs_vf_id, 1); + + qed_dmae_host2grc(p_hwfn, p_ptt, + (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + p_block->igu_sb_id * sizeof(u64), 2, NULL); + } + + vf->num_sbs = (u8)num_rx_queues; + + return vf->num_sbs; +} + +static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + int idx, igu_id; + u32 addr, val; + + /* Invalidate igu CAM lines and mark them as free */ + for (idx = 0; idx < vf->num_sbs; idx++) { + igu_id = vf->igu_sbs[idx]; + addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; + + val = qed_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + qed_wr(p_hwfn, p_ptt, addr, val); + + p_info->entry[igu_id].status |= QED_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++; + } + + vf->num_sbs = 0; +} + +static void qed_iov_set_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *params, + struct qed_mcp_link_state *link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + +static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_iov_vf_init_params *p_params) +{ + struct qed_mcp_link_capabilities link_caps; + struct qed_mcp_link_params link_params; + struct qed_mcp_link_state link_state; + u8 num_of_vf_avaiable_chains = 0; + struct qed_vf_info *vf = NULL; + u16 qid, num_irqs; + int rc = 0; + u32 cids; + u8 i; + + vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false); + if (!vf) { + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); + return -EINVAL; + } + + if (vf->b_init) { + DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", + p_params->rel_vf_id); + return -EINVAL; + } + + /* Perform sanity checking on the requested queue_id */ + for (i = 0; i < p_params->num_queues; i++) { + u16 min_vf_qzone = FEAT_NUM(p_hwfn, QED_PF_L2_QUE); + u16 max_vf_qzone = min_vf_qzone + + FEAT_NUM(p_hwfn, QED_VF_L2_QUE) - 1; + + qid = p_params->req_rx_queue[i]; + if (qid < min_vf_qzone || qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n", + qid, + p_params->rel_vf_id, + min_vf_qzone, max_vf_qzone); + return -EINVAL; + } + + qid = p_params->req_tx_queue[i]; + if (qid > max_vf_qzone) { + DP_NOTICE(p_hwfn, + "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n", + qid, p_params->rel_vf_id, max_vf_qzone); + return -EINVAL; + } + + /* If client *really* wants, Tx qid can be shared with PF */ + if (qid < min_vf_qzone) + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n", + p_params->rel_vf_id, qid, i); + } + + /* Limit number of queues according to number of CIDs */ + qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n", + vf->relative_vf_id, p_params->num_queues, (u16)cids); + num_irqs = min_t(u16, p_params->num_queues, ((u16)cids)); + + num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn, + p_ptt, + vf, num_irqs); + if (!num_of_vf_avaiable_chains) { + DP_ERR(p_hwfn, "no available igu sbs\n"); + return -ENOMEM; + } + + /* Choose queue number and index ranges */ + vf->num_rxqs = num_of_vf_avaiable_chains; + vf->num_txqs = num_of_vf_avaiable_chains; + + for (i = 0; i < vf->num_rxqs; i++) { + struct qed_vf_queue *p_queue = &vf->vf_queues[i]; + + p_queue->fw_rx_qid = p_params->req_rx_queue[i]; + p_queue->fw_tx_qid = p_params->req_tx_queue[i]; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n", + vf->relative_vf_id, i, vf->igu_sbs[i], + p_queue->fw_rx_qid, p_queue->fw_tx_qid); + } + + /* Update the link configuration in bulletin */ + memcpy(&link_params, qed_mcp_get_link_params(p_hwfn), + sizeof(link_params)); + memcpy(&link_state, qed_mcp_get_link_state(p_hwfn), sizeof(link_state)); + memcpy(&link_caps, qed_mcp_get_link_capabilities(p_hwfn), + sizeof(link_caps)); + qed_iov_set_link(p_hwfn, p_params->rel_vf_id, + &link_params, &link_state, &link_caps); + + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf); + if (!rc) { + vf->b_init = true; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs++; + } + + return rc; +} + +static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, u16 rel_vf_id) +{ + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + struct qed_vf_info *vf = NULL; + + vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!vf) { + DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__); + return -EINVAL; + } + + if (vf->bulletin.p_virt) + memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt)); + + memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info)); + + /* Get the link configuration back in bulletin so + * that when VFs are re-enabled they get the actual + * link configuration. + */ + memcpy(¶ms, qed_mcp_get_link_params(p_hwfn), sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); + qed_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps); + + /* Forget the VF's acquisition message */ + memset(&vf->acquire, 0, sizeof(vf->acquire)); + + /* disablng interrupts and resetting permission table was done during + * vf-close, however, we could get here without going through vf_close + */ + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + vf->num_rxqs = 0; + vf->num_txqs = 0; + qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); + + if (vf->b_init) { + vf->b_init = false; + + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->p_iov_info->num_vfs--; + } + + return 0; +} + +static bool qed_iov_tlv_supported(u16 tlvtype) +{ + return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX; +} + +/* place a given tlv on the tlv buffer, continuing current tlv list */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length) +{ + struct channel_tlv *tl = (struct channel_tlv *)*offset; + + tl->type = type; + tl->length = length; + + /* Offset should keep pointing to next TLV (the end of the last) */ + *offset += length; + + /* Return a pointer to the start of the added tlv */ + return *offset - length; +} + +/* list the types and lengths of the tlvs on the buffer */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list) +{ + u16 i = 1, total_length = 0; + struct channel_tlv *tlv; + + do { + tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); + + /* output tlv */ + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV number %d: type %d, length %d\n", + i, tlv->type, tlv->length); + + if (tlv->type == CHANNEL_TLV_LIST_END) + return; + + /* Validate entry - protect against malicious VFs */ + if (!tlv->length) { + DP_NOTICE(p_hwfn, "TLV of length 0 found\n"); + return; + } + + total_length += tlv->length; + + if (total_length >= sizeof(struct tlv_buffer_size)) { + DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n"); + return; + } + + i++; + } while (1); +} + +static void qed_iov_send_response(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct qed_dmae_params params; + u8 eng_vf_id; + + mbx->reply_virt->default_resp.hdr.status = status; + + qed_dp_tlv_list(p_hwfn, mbx->reply_virt); + + eng_vf_id = p_vf->abs_vf_id; + + memset(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1); + params.dst_vfid = eng_vf_id; + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), + mbx->req_virt->first_tlv.reply_address + + sizeof(u64), + (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, + ¶ms); + + /* Once PF copies the rc to the VF, the latter can continue + * and send an additional message. So we have to make sure the + * channel would be re-set to ready prior to that. + */ + REG_WR(p_hwfn, + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1); + + qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); +} + +static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn, + enum qed_iov_vport_update_flag flag) +{ + switch (flag) { + case QED_IOV_VP_UPDATE_ACTIVATE: + return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case QED_IOV_VP_UPDATE_VLAN_STRIP: + return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + case QED_IOV_VP_UPDATE_TX_SWITCH: + return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + case QED_IOV_VP_UPDATE_MCAST: + return CHANNEL_TLV_VPORT_UPDATE_MCAST; + case QED_IOV_VP_UPDATE_ACCEPT_PARAM: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + case QED_IOV_VP_UPDATE_RSS: + return CHANNEL_TLV_VPORT_UPDATE_RSS; + case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + case QED_IOV_VP_UPDATE_SGE_TPA: + return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + default: + return 0; + } +} + +static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, + u8 status, + u16 tlvs_mask, u16 tlvs_accepted) +{ + struct pfvf_def_resp_tlv *resp; + u16 size, total_len, i; + + memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); + p_mbx->offset = (u8 *)p_mbx->reply_virt; + size = sizeof(struct pfvf_def_resp_tlv); + total_len = size; + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + + /* Prepare response for all extended tlvs if they are found by PF */ + for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) { + if (!(tlvs_mask & BIT(i))) + continue; + + resp = qed_add_tlv(p_hwfn, &p_mbx->offset, + qed_iov_vport_to_tlv(p_hwfn, i), size); + + if (tlvs_accepted & BIT(i)) + resp->hdr.status = status; + else + resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - vport_update response: TLV %d, status %02x\n", + p_vf->relative_vf_id, + qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); + + total_len += size; + } + + qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + return total_len; +} + +static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf_info, + u16 type, u16 length, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx; + + mbx->offset = (u8 *)mbx->reply_virt; + + qed_add_tlv(p_hwfn, &mbx->offset, type, length); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); +} + +static struct +qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct qed_vf_info *vf = NULL; + + vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); + if (!vf) + return NULL; + + return &vf->p_vf_info; +} + +static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid) +{ + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false); + + if (!vf_info) + return; + + /* Clear the VF mac */ + eth_zero_addr(vf_info->mac); + + vf_info->rx_accept_mode = 0; + vf_info->tx_accept_mode = 0; +} + +static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + u32 i, j; + + p_vf->vf_bulletin = 0; + p_vf->vport_instance = 0; + p_vf->configured_features = 0; + + /* If VF previously requested less resources, go back to default */ + p_vf->num_rxqs = p_vf->num_sbs; + p_vf->num_txqs = p_vf->num_sbs; + + p_vf->num_active_rxqs = 0; + + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; + + for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) { + if (!p_queue->cids[j].p_cid) + continue; + + qed_eth_queue_cid_release(p_hwfn, + p_queue->cids[j].p_cid); + p_queue->cids[j].p_cid = NULL; + } + } + + memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + memset(&p_vf->acquire, 0, sizeof(p_vf->acquire)); + qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id); +} + +/* Returns either 0, or log(size) */ +static u32 qed_iov_vf_db_bar_size(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 val = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE); + + if (val) + return val + 11; + return 0; +} + +static void +qed_iov_vf_mbx_acquire_resc_cids(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons; + u8 db_size = qed_db_addr_vf(1, DQ_DEMS_LEGACY) - + qed_db_addr_vf(0, DQ_DEMS_LEGACY); + u32 bar_size; + + p_resp->num_cids = min_t(u8, p_req->num_cids, num_vf_cons); + + /* If VF didn't bother asking for QIDs than don't bother limiting + * number of CIDs. The VF doesn't care about the number, and this + * has the likely result of causing an additional acquisition. + */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount + * that would make sure doorbells for all CIDs fall within the bar. + * If it doesn't, make sure regview window is sufficient. + */ + if (p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PHYSICAL_BAR) { + bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); + if (bar_size) + bar_size = 1 << bar_size; + + if (p_hwfn->cdev->num_hwfns > 1) + bar_size /= 2; + } else { + bar_size = PXP_VF_BAR0_DQ_LENGTH; + } + + if (bar_size / db_size < 256) + p_resp->num_cids = min_t(u8, p_resp->num_cids, + (u8)(bar_size / db_size)); +} + +static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + u8 i; + + /* Queue related information */ + p_resp->num_rxqs = p_vf->num_rxqs; + p_resp->num_txqs = p_vf->num_txqs; + p_resp->num_sbs = p_vf->num_sbs; + + for (i = 0; i < p_resp->num_sbs; i++) { + p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i]; + p_resp->hw_sbs[i].sb_qid = 0; + } + + /* These fields are filled for backward compatibility. + * Unused by modern vfs. + */ + for (i = 0; i < p_resp->num_rxqs; i++) { + qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid, + (u16 *)&p_resp->hw_qid[i]); + p_resp->cid[i] = i; + } + + /* Filter related information */ + p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters, + p_req->num_mac_filters); + p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters, + p_req->num_vlan_filters); + + qed_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp); + + /* This isn't really needed/enforced, but some legacy VFs might depend + * on the correct filling of this field. + */ + p_resp->num_mc_filters = QED_MAX_MC_ADDRS; + + /* Validate sufficient resources for VF */ + if (p_resp->num_rxqs < p_req->num_rxqs || + p_resp->num_txqs < p_req->num_txqs || + p_resp->num_sbs < p_req->num_sbs || + p_resp->num_mac_filters < p_req->num_mac_filters || + p_resp->num_vlan_filters < p_req->num_vlan_filters || + p_resp->num_mc_filters < p_req->num_mc_filters || + p_resp->num_cids < p_req->num_cids) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n", + p_vf->abs_vf_id, + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, + p_resp->num_mc_filters, + p_req->num_cids, p_resp->num_cids); + + /* Some legacy OSes are incapable of correctly handling this + * failure. + */ + if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) && + (p_vf->acquire.vfdev_info.os_type == + VFPF_ACQUIRE_OS_WINDOWS)) + return PFVF_STATUS_SUCCESS; + + return PFVF_STATUS_NO_RESOURCE; + } + + return PFVF_STATUS_SUCCESS; +} + +static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn, + struct pfvf_stats_info *p_stats) +{ + p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); + p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B + + offsetof(struct ustorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); + p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B + + offsetof(struct pstorm_vf_zone, + non_trigger.eth_queue_stat); + p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); + p_stats->tstats.address = 0; + p_stats->tstats.len = 0; +} + +static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; + u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + struct pf_vf_resc *resc = &resp->resc; + int rc; + + memset(resp, 0, sizeof(*resp)); + + /* Write the PF version so that VF would know which version + * is supported - might be later overridden. This guarantees that + * VF could recognize legacy PF based on lack of versions in reply. + */ + pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR; + pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR; + + if (vf->state != VF_FREE && vf->state != VF_STOPPED) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] sent ACQUIRE but is already in state %d - fail request\n", + vf->abs_vf_id, vf->state); + goto out; + } + + /* Validate FW compatibility */ + if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] is pre-fastpath HSI\n", + vf->abs_vf_id); + p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN; + } else { + DP_INFO(p_hwfn, + "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n", + vf->abs_vf_id, + req->vfdev_info.eth_fp_hsi_major, + req->vfdev_info.eth_fp_hsi_minor, + ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR); + + goto out; + } + } + + /* On 100g PFs, prevent old VFs from loading */ + if ((p_hwfn->cdev->num_hwfns > 1) && + !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) { + DP_INFO(p_hwfn, + "VF[%d] is running an old driver that doesn't support 100g\n", + vf->abs_vf_id); + goto out; + } + + /* Store the acquire message */ + memcpy(&vf->acquire, req, sizeof(vf->acquire)); + + vf->opaque_fid = req->vfdev_info.opaque_fid; + + vf->vf_bulletin = req->bulletin_addr; + vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? + vf->bulletin.size : req->bulletin_size; + + /* fill in pfdev info */ + pfdev_info->chip_num = p_hwfn->cdev->chip_num; + pfdev_info->db_size = 0; + pfdev_info->indices_per_sb = PIS_PER_SB; + + pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED | + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE; + if (p_hwfn->cdev->num_hwfns > 1) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G; + + /* Share our ability to use multiple queue-ids only with VFs + * that request it. + */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS) + pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS; + + /* Share the sizes of the bars with VF */ + resp->pfdev_info.bar_size = qed_iov_vf_db_bar_size(p_hwfn, p_ptt); + + qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info); + + memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN); + + pfdev_info->fw_major = FW_MAJOR_VERSION; + pfdev_info->fw_minor = FW_MINOR_VERSION; + pfdev_info->fw_rev = FW_REVISION_VERSION; + pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + + /* Incorrect when legacy, but doesn't matter as legacy isn't reading + * this field. + */ + pfdev_info->minor_fp_hsi = min_t(u8, ETH_HSI_VER_MINOR, + req->vfdev_info.eth_fp_hsi_minor); + pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX; + qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL); + + pfdev_info->dev_type = p_hwfn->cdev->type; + pfdev_info->chip_rev = p_hwfn->cdev->chip_rev; + + /* Fill resources available to VF; Make sure there are enough to + * satisfy the VF's request. + */ + vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf, + &req->resc_request, resc); + if (vfpf_status != PFVF_STATUS_SUCCESS) + goto out; + + /* Start the VF in FW */ + rc = qed_sp_vf_start(p_hwfn, vf); + if (rc) { + DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id); + vfpf_status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Fill agreed size of bulletin board in response */ + resp->bulletin_size = vf->bulletin.size; + qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n", + vf->abs_vf_id, + resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, + resp->pfdev_info.indices_per_sb, + resp->pfdev_info.capabilities, + resc->num_rxqs, + resc->num_txqs, + resc->num_sbs, + resc->num_mac_filters, + resc->num_vlan_filters); + vf->state = VF_ACQUIRED; + + /* Prepare Response */ +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, + sizeof(struct pfvf_acquire_resp_tlv), vfpf_status); +} + +static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, bool val) +{ + struct qed_sp_vport_update_params params; + int rc; + + if (val == p_vf->spoof_chk) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk value[%d] is already configured\n", val); + return 0; + } + + memset(¶ms, 0, sizeof(struct qed_sp_vport_update_params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.update_anti_spoofing_en_flg = 1; + params.anti_spoofing_en = val; + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + if (!rc) { + p_vf->spoof_chk = val; + p_vf->req_spoofchk_val = p_vf->spoof_chk; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk val[%d] configured\n", val); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Spoofchk configuration[val:%d] failed for VF[%d]\n", + val, p_vf->relative_vf_id); + } + + return rc; +} + +static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf) +{ + struct qed_filter_ucast filter; + int rc = 0; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.opcode = QED_FILTER_ADD; + + /* Reconfigure vlans */ + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (!p_vf->shadow_config.vlans[i].used) + continue; + + filter.type = QED_FILTER_VLAN; + filter.vlan = p_vf->shadow_config.vlans[i].vid; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Reconfiguring VLAN [0x%04x] for VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to configure VLAN [%04x] to VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + break; + } + } + + return rc; +} + +static int +qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + + if ((events & BIT(VLAN_ADDR_FORCED)) && + !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) + rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); + + return rc; +} + +static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, u64 events) +{ + int rc = 0; + struct qed_filter_ucast filter; + + if (!p_vf->vport_instance) + return -EINVAL; + + if ((events & BIT(MAC_ADDR_FORCED)) || + p_vf->p_vf_info.is_trusted_configured) { + /* Since there's no way [currently] of removing the MAC, + * we can always assume this means we need to force it. + */ + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_MAC; + filter.opcode = QED_FILTER_REPLACE; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac); + + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure MAC for VF\n"); + return rc; + } + if (p_vf->p_vf_info.is_trusted_configured) + p_vf->configured_features |= + BIT(VFPF_BULLETIN_MAC_ADDR); + else + p_vf->configured_features |= + BIT(MAC_ADDR_FORCED); + } + + if (events & BIT(VLAN_ADDR_FORCED)) { + struct qed_sp_vport_update_params vport_update; + u8 removal; + int i; + + memset(&filter, 0, sizeof(filter)); + filter.type = QED_FILTER_VLAN; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.vlan = p_vf->bulletin.p_virt->pvid; + filter.opcode = filter.vlan ? QED_FILTER_REPLACE : + QED_FILTER_FLUSH; + + /* Send the ramrod */ + rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, QED_SPQ_MODE_CB, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VLAN for VF\n"); + return rc; + } + + /* Update the default-vlan & silent vlan stripping */ + memset(&vport_update, 0, sizeof(vport_update)); + vport_update.opaque_fid = p_vf->opaque_fid; + vport_update.vport_id = p_vf->vport_id; + vport_update.update_default_vlan_enable_flg = 1; + vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; + vport_update.update_default_vlan_flg = 1; + vport_update.default_vlan = filter.vlan; + + vport_update.update_inner_vlan_removal_flg = 1; + removal = filter.vlan ? 1 + : p_vf->shadow_config.inner_vlan_removal; + vport_update.inner_vlan_removal_flg = removal; + vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; + rc = qed_sp_vport_update(p_hwfn, + &vport_update, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "PF failed to configure VF vport for vlan\n"); + return rc; + } + + /* Update all the Rx queues */ + for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) { + struct qed_vf_queue *p_queue = &p_vf->vf_queues[i]; + struct qed_queue_cid *p_cid = NULL; + + /* There can be at most 1 Rx queue on qzone. Find it */ + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); + if (!p_cid) + continue; + + rc = qed_sp_eth_rx_queues_update(p_hwfn, + (void **)&p_cid, + 1, 0, 1, + QED_SPQ_MODE_EBLOCK, + NULL); + if (rc) { + DP_NOTICE(p_hwfn, + "Failed to send Rx update fo queue[0x%04x]\n", + p_cid->rel.queue_id); + return rc; + } + } + + if (filter.vlan) + p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; + else + p_vf->configured_features &= ~BIT(VLAN_ADDR_FORCED); + } + + /* If forced features are terminated, we need to configure the shadow + * configuration back again. + */ + if (events) + qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); + + return rc; +} + +static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_sp_vport_start_params params = { 0 }; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_vport_start_tlv *start; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_vf_info *vf_info; + u64 *p_bitmap; + int sb_id; + int rc; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Failed to get VF info, invalid vfid [%d]\n", + vf->relative_vf_id); + return; + } + + vf->state = VF_ENABLED; + start = &mbx->req_virt->start_vport; + + qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + + /* Initialize Status block in CAU */ + for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { + if (!start->sb_addr[sb_id]) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] did not fill the address of SB %d\n", + vf->relative_vf_id, sb_id); + break; + } + + qed_int_cau_conf_sb(p_hwfn, p_ptt, + start->sb_addr[sb_id], + vf->igu_sbs[sb_id], vf->abs_vf_id, 1); + } + + vf->mtu = start->mtu; + vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; + + /* Take into consideration configuration forced by hypervisor; + * If none is configured, use the supplied VF values [for old + * vfs that would still be fine, since they passed '0' as padding]. + */ + p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; + if (!(*p_bitmap & BIT(VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + u8 vf_req = start->only_untagged; + + vf_info->bulletin.p_virt->default_only_untagged = vf_req; + *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; + } + + params.tpa_mode = start->tpa_mode; + params.remove_inner_vlan = start->inner_vlan_removal; + params.tx_switching = true; + + params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; + params.drop_ttl0 = false; + params.concrete_fid = vf->concrete_fid; + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.max_buffers_per_cqe = start->max_buffers_per_cqe; + params.mtu = vf->mtu; + + /* Non trusted VFs should enable control frame filtering */ + params.check_mac = !vf->p_vf_info.is_trusted_configured; + + rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); + if (rc) { + DP_ERR(p_hwfn, + "%s returned error %d\n", __func__, rc); + status = PFVF_STATUS_FAILURE; + } else { + vf->vport_instance++; + + /* Force configuration if needed on the newly opened vport */ + qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); + + __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); + } + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u8 status = PFVF_STATUS_SUCCESS; + int rc; + + vf->vport_instance--; + vf->spoof_chk = false; + + if ((qed_iov_validate_active_rxq(p_hwfn, vf)) || + (qed_iov_validate_active_txq(p_hwfn, vf))) { + vf->b_malicious = true; + DP_NOTICE(p_hwfn, + "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n", + vf->abs_vf_id); + status = PFVF_STATUS_MALICIOUS; + goto out; + } + + rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); + if (rc) { + DP_ERR(p_hwfn, "%s returned error %d\n", + __func__, rc); + status = PFVF_STATUS_FAILURE; + } + + /* Forget the configuration on the vport */ + vf->configured_features = 0; + memset(&vf->shadow_config, 0, sizeof(vf->shadow_config)); + +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf, + u8 status, bool b_legacy) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + struct vfpf_start_rxq_tlv *req; + u16 length; + + mbx->offset = (u8 *)mbx->reply_virt; + + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ, + length); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) { + req = &mbx->req_virt->start_rxq; + p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B + + offsetof(struct mstorm_vf_zone, + non_trigger.eth_rx_queue_producers) + + sizeof(struct eth_rx_prod_data) * req->rx_qid; + } + + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static u8 qed_iov_vf_mbx_qid(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, bool b_is_tx) +{ + struct qed_iov_vf_mbx *p_mbx = &p_vf->vf_mbx; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Search for the qid if the VF published its going to provide it */ + if (!(p_vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS)) { + if (b_is_tx) + return QED_IOV_LEGACY_QID_TX; + else + return QED_IOV_LEGACY_QID_RX; + } + + p_qid_tlv = (struct vfpf_qid_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + CHANNEL_TLV_QID); + if (!p_qid_tlv) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%2x]: Failed to provide qid\n", + p_vf->relative_vf_id); + + return QED_IOV_QID_INVALID; + } + + if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Provided qid out-of-bounds %02x\n", + p_vf->relative_vf_id, p_qid_tlv->qid); + return QED_IOV_QID_INVALID; + } + + return p_qid_tlv->qid; +} + +static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_queue_start_common_params params; + struct qed_queue_cid_vf_params vf_params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; + u8 qid_usage_idx, vf_legacy = 0; + struct vfpf_start_rxq_tlv *req; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + struct qed_sb_info sb_dummy; + int rc; + + req = &mbx->req_virt->start_rxq; + + if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid, + QED_IOV_VALIDATE_Q_DISABLE) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + p_queue = &vf->vf_queues[req->rx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = qed_vf_calculate_legacy(vf); + + /* Acquire a new queue-cid */ + memset(¶ms, 0, sizeof(params)); + params.queue_id = p_queue->fw_rx_qid; + params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; + params.sb_idx = req->sb_index; + + memset(&vf_params, 0, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->rx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, true, &vf_params); + if (!p_cid) + goto out; + + /* Legacy VFs have their Producers in a different location, which they + * calculate on their own and clean the producer prior to this. + */ + if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD)) + qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY + + SEM_FAST_REG_INT_RAM + + MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, + req->rx_qid), 0); + + rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, req->cqe_pbl_size); + if (rc) { + status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_cid); + } else { + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = false; + status = PFVF_STATUS_SUCCESS; + vf->num_active_rxqs++; + } + +out: + qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status, + !!(vf_legacy & + QED_QCID_LEGACY_VF_RX_PROD)); +} + +static void +qed_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, + struct qed_tunnel_info *p_tun, + u16 tunn_feature_mask) +{ + p_resp->tunn_feature_mask = tunn_feature_mask; + p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; + p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; + p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; + p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->vxlan_clss = p_tun->vxlan.tun_cls; + p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; + p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; + p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; + p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; + p_resp->geneve_udp_port = p_tun->geneve_port.port; + p_resp->vxlan_udp_port = p_tun->vxlan_port.port; +} + +static void +__qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_tun, + enum qed_tunn_mode mask, u8 tun_cls) +{ + if (p_req->tun_mode_update_mask & BIT(mask)) { + p_tun->b_update_mode = true; + + if (p_req->tunn_mode & BIT(mask)) + p_tun->b_mode_enabled = true; + } + + p_tun->tun_cls = tun_cls; +} + +static void +qed_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_tun, + struct qed_tunn_update_udp_port *p_port, + enum qed_tunn_mode mask, + u8 tun_cls, u8 update_port, u16 port) +{ + if (update_port) { + p_port->b_update_port = true; + p_port->port = port; + } + + __qed_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); +} + +static bool +qed_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) +{ + bool b_update_requested = false; + + if (p_req->tun_mode_update_mask || p_req->update_tun_cls || + p_req->update_geneve_port || p_req->update_vxlan_port) + b_update_requested = true; + + return b_update_requested; +} + +static void qed_pf_validate_tunn_mode(struct qed_tunn_update_type *tun, int *rc) +{ + if (tun->b_update_mode && !tun->b_mode_enabled) { + tun->b_update_mode = false; + *rc = -EINVAL; + } +} + +static int +qed_pf_validate_modify_tunn_config(struct qed_hwfn *p_hwfn, + u16 *tun_features, bool *update, + struct qed_tunnel_info *tun_src) +{ + struct qed_eth_cb_ops *ops = p_hwfn->cdev->protocol_ops.eth; + struct qed_tunnel_info *tun = &p_hwfn->cdev->tunnel; + u16 bultn_vxlan_port, bultn_geneve_port; + void *cookie = p_hwfn->cdev->ops_cookie; + int i, rc = 0; + + *tun_features = p_hwfn->cdev->tunn_feature_mask; + bultn_vxlan_port = tun->vxlan_port.port; + bultn_geneve_port = tun->geneve_port.port; + qed_pf_validate_tunn_mode(&tun_src->vxlan, &rc); + qed_pf_validate_tunn_mode(&tun_src->l2_geneve, &rc); + qed_pf_validate_tunn_mode(&tun_src->ip_geneve, &rc); + qed_pf_validate_tunn_mode(&tun_src->l2_gre, &rc); + qed_pf_validate_tunn_mode(&tun_src->ip_gre, &rc); + + if ((tun_src->b_update_rx_cls || tun_src->b_update_tx_cls) && + (tun_src->vxlan.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->l2_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->ip_geneve.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->l2_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN || + tun_src->ip_gre.tun_cls != QED_TUNN_CLSS_MAC_VLAN)) { + tun_src->b_update_rx_cls = false; + tun_src->b_update_tx_cls = false; + rc = -EINVAL; + } + + if (tun_src->vxlan_port.b_update_port) { + if (tun_src->vxlan_port.port == tun->vxlan_port.port) { + tun_src->vxlan_port.b_update_port = false; + } else { + *update = true; + bultn_vxlan_port = tun_src->vxlan_port.port; + } + } + + if (tun_src->geneve_port.b_update_port) { + if (tun_src->geneve_port.port == tun->geneve_port.port) { + tun_src->geneve_port.b_update_port = false; + } else { + *update = true; + bultn_geneve_port = tun_src->geneve_port.port; + } + } + + qed_for_each_vf(p_hwfn, i) { + qed_iov_bulletin_set_udp_ports(p_hwfn, i, bultn_vxlan_port, + bultn_geneve_port); + } + + qed_schedule_iov(p_hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + ops->ports_update(cookie, bultn_vxlan_port, bultn_geneve_port); + + return rc; +} + +static void qed_iov_vf_mbx_update_tunn_param(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + bool b_update_required = false; + struct qed_tunnel_info tunn; + u16 tunn_feature_mask = 0; + int i, rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + + memset(&tunn, 0, sizeof(tunn)); + p_req = &mbx->req_virt->tunn_param_update; + + if (!qed_iov_pf_validate_tunn_param(p_req)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No tunnel update requested by VF\n"); + status = PFVF_STATUS_FAILURE; + goto send_resp; + } + + tunn.b_update_rx_cls = p_req->update_tun_cls; + tunn.b_update_tx_cls = p_req->update_tun_cls; + + qed_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, + QED_MODE_VXLAN_TUNN, p_req->vxlan_clss, + p_req->update_vxlan_port, + p_req->vxlan_port); + qed_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, + QED_MODE_L2GENEVE_TUNN, + p_req->l2geneve_clss, + p_req->update_geneve_port, + p_req->geneve_port); + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, + QED_MODE_IPGENEVE_TUNN, + p_req->ipgeneve_clss); + __qed_iov_pf_update_tun_param(p_req, &tunn.l2_gre, + QED_MODE_L2GRE_TUNN, p_req->l2gre_clss); + __qed_iov_pf_update_tun_param(p_req, &tunn.ip_gre, + QED_MODE_IPGRE_TUNN, p_req->ipgre_clss); + + /* If PF modifies VF's req then it should + * still return an error in case of partial configuration + * or modified configuration as opposed to requested one. + */ + rc = qed_pf_validate_modify_tunn_config(p_hwfn, &tunn_feature_mask, + &b_update_required, &tunn); + + if (rc) + status = PFVF_STATUS_FAILURE; + + /* If QED client is willing to update anything ? */ + if (b_update_required) { + u16 geneve_port; + + rc = qed_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + + geneve_port = p_tun->geneve_port.port; + qed_for_each_vf(p_hwfn, i) { + qed_iov_bulletin_set_udp_ports(p_hwfn, i, + p_tun->vxlan_port.port, + geneve_port); + } + } + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, + CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); + + qed_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf, + u32 cid, u8 status) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_start_queue_resp_tlv *p_tlv; + bool b_legacy = false; + u16 length; + + mbx->offset = (u8 *)mbx->reply_virt; + + /* Taking a bigger struct instead of adding a TLV to list was a + * mistake, but one which we're now stuck with, as some older + * clients assume the size of the previous response. + */ + if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor == + ETH_HSI_VER_NO_PKT_LEN_TUNN) + b_legacy = true; + + if (!b_legacy) + length = sizeof(*p_tlv); + else + length = sizeof(struct pfvf_def_resp_tlv); + + p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ, + length); + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + /* Update the TLV with the response */ + if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) + p_tlv->offset = qed_db_addr_vf(cid, DQ_DEMS_LEGACY); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, length, status); +} + +static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_queue_start_common_params params; + struct qed_queue_cid_vf_params vf_params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_NO_RESOURCE; + struct vfpf_start_txq_tlv *req; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + struct qed_sb_info sb_dummy; + u8 qid_usage_idx, vf_legacy; + u32 cid = 0; + int rc; + u16 pq; + + memset(¶ms, 0, sizeof(params)); + req = &mbx->req_virt->start_txq; + + if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid, + QED_IOV_VALIDATE_Q_NA) || + !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb)) + goto out; + + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + p_queue = &vf->vf_queues[req->tx_qid]; + if (p_queue->cids[qid_usage_idx].p_cid) + goto out; + + vf_legacy = qed_vf_calculate_legacy(vf); + + /* Acquire a new queue-cid */ + params.queue_id = p_queue->fw_tx_qid; + params.vport_id = vf->vport_id; + params.stats_id = vf->abs_vf_id + 0x10; + + /* Since IGU index is passed via sb_info, construct a dummy one */ + memset(&sb_dummy, 0, sizeof(sb_dummy)); + sb_dummy.igu_sb_id = req->hw_sb; + params.p_sb = &sb_dummy; + params.sb_idx = req->sb_index; + + memset(&vf_params, 0, sizeof(vf_params)); + vf_params.vfid = vf->relative_vf_id; + vf_params.vf_qid = (u8)req->tx_qid; + vf_params.vf_legacy = vf_legacy; + vf_params.qid_usage_idx = qid_usage_idx; + + p_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid, + ¶ms, false, &vf_params); + if (!p_cid) + goto out; + + pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); + rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid, + req->pbl_addr, req->pbl_size, pq); + if (rc) { + status = PFVF_STATUS_FAILURE; + qed_eth_queue_cid_release(p_hwfn, p_cid); + } else { + status = PFVF_STATUS_SUCCESS; + p_queue->cids[qid_usage_idx].p_cid = p_cid; + p_queue->cids[qid_usage_idx].b_is_tx = true; + cid = p_cid->cid; + } + +out: + qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, cid, status); +} + +static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + u16 rxq_id, + u8 qid_usage_idx, bool cqe_completion) +{ + struct qed_vf_queue *p_queue; + int rc = 0; + + if (!qed_iov_validate_rxq(p_hwfn, vf, rxq_id, QED_IOV_VALIDATE_Q_NA)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n", + vf->relative_vf_id, rxq_id, qid_usage_idx); + return -EINVAL; + } + + p_queue = &vf->vf_queues[rxq_id]; + + /* We've validated the index and the existence of the active RXQ - + * now we need to make sure that it's using the correct qid. + */ + if (!p_queue->cids[qid_usage_idx].p_cid || + p_queue->cids[qid_usage_idx].b_is_tx) { + struct qed_queue_cid *p_cid; + + p_cid = qed_iov_get_vf_rx_queue_cid(p_queue); + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n", + vf->relative_vf_id, + rxq_id, qid_usage_idx, rxq_id, p_cid->qid_usage_idx); + return -EINVAL; + } + + /* Now that we know we have a valid Rx-queue - close it */ + rc = qed_eth_rx_queue_stop(p_hwfn, + p_queue->cids[qid_usage_idx].p_cid, + false, cqe_completion); + if (rc) + return rc; + + p_queue->cids[qid_usage_idx].p_cid = NULL; + vf->num_active_rxqs--; + + return 0; +} + +static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + u16 txq_id, u8 qid_usage_idx) +{ + struct qed_vf_queue *p_queue; + int rc = 0; + + if (!qed_iov_validate_txq(p_hwfn, vf, txq_id, QED_IOV_VALIDATE_Q_NA)) + return -EINVAL; + + p_queue = &vf->vf_queues[txq_id]; + if (!p_queue->cids[qid_usage_idx].p_cid || + !p_queue->cids[qid_usage_idx].b_is_tx) + return -EINVAL; + + rc = qed_eth_tx_queue_stop(p_hwfn, p_queue->cids[qid_usage_idx].p_cid); + if (rc) + return rc; + + p_queue->cids[qid_usage_idx].p_cid = NULL; + return 0; +} + +static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_FAILURE; + struct vfpf_stop_rxqs_tlv *req; + u8 qid_usage_idx; + int rc; + + /* There has never been an official driver that used this interface + * for stopping multiple queues, and it is now considered deprecated. + * Validate this isn't used here. + */ + req = &mbx->req_virt->stop_rxqs; + if (req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Rx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* Find which qid-index is associated with the queue */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + qid_usage_idx, req->cqe_completion); + if (!rc) + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, + length, status); +} + +static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + u8 status = PFVF_STATUS_FAILURE; + struct vfpf_stop_txqs_tlv *req; + u8 qid_usage_idx; + int rc; + + /* There has never been an official driver that used this interface + * for stopping multiple queues, and it is now considered deprecated. + * Validate this isn't used here. + */ + req = &mbx->req_virt->stop_txqs; + if (req->num_txqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Odd; VF[%d] tried stopping multiple Tx queues\n", + vf->relative_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + /* Find which qid-index is associated with the queue */ + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, true); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, qid_usage_idx); + if (!rc) + status = PFVF_STATUS_SUCCESS; + +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, + length, status); +} + +static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_queue_cid *handlers[QED_MAX_VF_CHAINS_PER_PF]; + u16 length = sizeof(struct pfvf_def_resp_tlv); + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_rxq_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + u8 complete_event_flg; + u8 complete_cqe_flg; + u8 qid_usage_idx; + int rc; + u8 i; + + req = &mbx->req_virt->update_rxq; + complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); + complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + + qid_usage_idx = qed_iov_vf_mbx_qid(p_hwfn, vf, false); + if (qid_usage_idx == QED_IOV_QID_INVALID) + goto out; + + /* There shouldn't exist a VF that uses queue-qids yet uses this + * API with multiple Rx queues. Validate this. + */ + if ((vf->acquire.vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_QUEUE_QIDS) && req->num_rxqs != 1) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] supports QIDs but sends multiple queues\n", + vf->relative_vf_id); + goto out; + } + + /* Validate inputs - for the legacy case this is still true since + * qid_usage_idx for each Rx queue would be LEGACY_QID_RX. + */ + for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) { + if (!qed_iov_validate_rxq(p_hwfn, vf, i, + QED_IOV_VALIDATE_Q_NA) || + !vf->vf_queues[i].cids[qid_usage_idx].p_cid || + vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Incorrect Rxqs [%04x, %02x]\n", + vf->relative_vf_id, req->rx_qid, + req->num_rxqs); + goto out; + } + } + + /* Prepare the handlers */ + for (i = 0; i < req->num_rxqs; i++) { + u16 qid = req->rx_qid + i; + + handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid; + } + + rc = qed_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers, + req->num_rxqs, + complete_cqe_flg, + complete_event_flg, + QED_SPQ_MODE_EBLOCK, NULL); + if (rc) + goto out; + + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, + length, status); +} + +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type) +{ + struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; + int len = 0; + + do { + if (!p_tlv->length) { + DP_NOTICE(p_hwfn, "Zero length TLV found\n"); + return NULL; + } + + if (p_tlv->type == req_type) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Extended tlv type %d, length %d found\n", + p_tlv->type, p_tlv->length); + return p_tlv; + } + + len += p_tlv->length; + p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); + + if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { + DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n"); + return NULL; + } + } while (p_tlv->type != CHANNEL_TLV_LIST_END); + + return NULL; +} + +static void +qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_activate_tlv *p_act_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + + p_act_tlv = (struct vfpf_vport_update_activate_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_act_tlv) + return; + + p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; + p_data->vport_active_rx_flg = p_act_tlv->active_rx; + p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; + p_data->vport_active_tx_flg = p_act_tlv->active_tx; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE; +} + +static void +qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_vf_info *p_vf, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + + p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_vlan_tlv) + return; + + p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; + + /* Ignore the VF request if we're forcing a vlan */ + if (!(p_vf->configured_features & BIT(VLAN_ADDR_FORCED))) { + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + } + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP; +} + +static void +qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + + p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_tx_switch_tlv) + return; + + p_data->update_tx_switching_flg = 1; + p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH; +} + +static void +qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + + p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_mcast_tlv) + return; + + p_data->update_approx_mcast_flg = 1; + memcpy(p_data->bins, p_mcast_tlv->bins, + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; +} + +static void +qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct qed_filter_accept_flags *p_flags = &p_data->accept_flags; + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + + p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_accept_tlv) + return; + + p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode; + p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter; + p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode; + p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM; +} + +static void +qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + + p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, + tlv); + if (!p_accept_any_vlan) + return; + + p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; + p_data->update_accept_any_vlan_flg = + p_accept_any_vlan->update_accept_any_vlan_flg; + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; +} + +static void +qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_rss_params *p_rss, + struct qed_iov_vf_mbx *p_mbx, + u16 *tlvs_mask, u16 *tlvs_accepted) +{ + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + bool b_reject = false; + u16 table_size; + u16 i, q_idx; + + p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_rss_tlv) { + p_data->rss_params = NULL; + return; + } + + memset(p_rss, 0, sizeof(struct qed_rss_params)); + + p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CONFIG_FLAG); + p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CAPS_FLAG); + p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_IND_TABLE_FLAG); + p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_KEY_FLAG); + + p_rss->rss_enable = p_rss_tlv->rss_enable; + p_rss->rss_eng_id = vf->relative_vf_id + 1; + p_rss->rss_caps = p_rss_tlv->rss_caps; + p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; + memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key)); + + table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table), + (1 << p_rss_tlv->rss_table_size_log)); + + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_cid; + + q_idx = p_rss_tlv->rss_ind_table[i]; + if (!qed_iov_validate_rxq(p_hwfn, vf, q_idx, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Omitting RSS due to wrong queue %04x\n", + vf->relative_vf_id, q_idx); + b_reject = true; + goto out; + } + + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]); + p_rss->rss_ind_table[i] = p_cid; + } + + p_data->rss_params = p_rss; +out: + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS; + if (!b_reject) + *tlvs_accepted |= 1 << QED_IOV_VP_UPDATE_RSS; +} + +static void +qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn, + struct qed_vf_info *vf, + struct qed_sp_vport_update_params *p_data, + struct qed_sge_tpa_params *p_sge_tpa, + struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + + p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (!p_sge_tpa_tlv) { + p_data->sge_tpa_params = NULL; + return; + } + + memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params)); + + p_sge_tpa->update_tpa_en_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); + p_sge_tpa->update_tpa_param_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & + VFPF_UPDATE_TPA_PARAM_FLAG); + + p_sge_tpa->tpa_ipv4_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); + p_sge_tpa->tpa_ipv6_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); + p_sge_tpa->tpa_pkt_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); + p_sge_tpa->tpa_hdr_data_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); + p_sge_tpa->tpa_gro_consistent_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); + + p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; + p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; + p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; + p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; + p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; + + p_data->sge_tpa_params = p_sge_tpa; + + *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA; +} + +static int qed_iov_pre_update_vport(struct qed_hwfn *hwfn, + u8 vfid, + struct qed_sp_vport_update_params *params, + u16 *tlvs) +{ + u8 mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + struct qed_filter_accept_flags *flags = ¶ms->accept_flags; + struct qed_public_vf_info *vf_info; + u16 tlv_mask; + + tlv_mask = BIT(QED_IOV_VP_UPDATE_ACCEPT_PARAM) | + BIT(QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN); + + /* Untrusted VFs can't even be trusted to know that fact. + * Simply indicate everything is configured fine, and trace + * configuration 'behind their back'. + */ + if (!(*tlvs & tlv_mask)) + return 0; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (flags->update_rx_mode_config) { + vf_info->rx_accept_mode = flags->rx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->rx_accept_filter &= ~mask; + } + + if (flags->update_tx_mode_config) { + vf_info->tx_accept_mode = flags->tx_accept_filter; + if (!vf_info->is_trusted_configured) + flags->tx_accept_filter &= ~mask; + } + + if (params->update_accept_any_vlan_flg) { + vf_info->accept_any_vlan = params->accept_any_vlan; + + if (vf_info->forced_vlan && !vf_info->is_trusted_configured) + params->accept_any_vlan = false; + } + + return 0; +} + +static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_rss_params *p_rss_params = NULL; + struct qed_sp_vport_update_params params; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct qed_sge_tpa_params sge_tpa_params; + u16 tlvs_mask = 0, tlvs_accepted = 0; + u8 status = PFVF_STATUS_SUCCESS; + u16 length; + int rc; + + /* Valiate PF can send such a request */ + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing vport update\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + p_rss_params = vzalloc(sizeof(*p_rss_params)); + if (!p_rss_params) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.rss_params = NULL; + + /* Search for extended tlvs list and update values + * from VF in struct qed_sp_vport_update_params. + */ + qed_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); + qed_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); + qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, + &sge_tpa_params, mbx, &tlvs_mask); + + tlvs_accepted = tlvs_mask; + + /* Some of the extended TLVs need to be validated first; In that case, + * they can update the mask without updating the accepted [so that + * PF could communicate to VF it has rejected request]. + */ + qed_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, p_rss_params, + mbx, &tlvs_mask, &tlvs_accepted); + + if (qed_iov_pre_update_vport(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted)) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Upper-layer prevents VF vport configuration\n"); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "No feature tlvs found for vport update\n"); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + rc = qed_sp_vport_update(p_hwfn, ¶ms, QED_SPQ_MODE_EBLOCK, NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + vfree(p_rss_params); + length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, + tlvs_mask, tlvs_accepted); + qed_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (p_vf->shadow_config.vlans[i].used && + p_vf->shadow_config.vlans[i].vid == + p_params->vlan) { + p_vf->shadow_config.vlans[i].used = false; + break; + } + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to remove a non-existing vlan\n", + p_vf->relative_vf_id); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + p_vf->shadow_config.vlans[i].used = false; + } + + /* In forced mode, we're willing to remove entries - but we don't add + * new ones. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED)) + return 0; + + if (p_params->opcode == QED_FILTER_ADD || + p_params->opcode == QED_FILTER_REPLACE) { + for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (p_vf->shadow_config.vlans[i].used) + continue; + + p_vf->shadow_config.vlans[i].used = true; + p_vf->shadow_config.vlans[i].vid = p_params->vlan; + break; + } + + if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF [%d] - Tries to configure more than %d vlan filters\n", + p_vf->relative_vf_id, + QED_ETH_VF_NUM_VLAN_FILTERS + 1); + return -EINVAL; + } + } + + return 0; +} + +static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int i; + + /* If we're in forced-mode, we don't allow any change */ + if (p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) + return 0; + + /* Don't keep track of shadow copy since we don't intend to restore. */ + if (p_vf->p_vf_info.is_trusted_configured) + return 0; + + /* First remove entries and then add new ones */ + if (p_params->opcode == QED_FILTER_REMOVE) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(p_vf->shadow_config.macs[i], + p_params->mac)) { + eth_zero_addr(p_vf->shadow_config.macs[i]); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "MAC isn't configured\n"); + return -EINVAL; + } + } else if (p_params->opcode == QED_FILTER_REPLACE || + p_params->opcode == QED_FILTER_FLUSH) { + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) + eth_zero_addr(p_vf->shadow_config.macs[i]); + } + + /* List the new MAC address */ + if (p_params->opcode != QED_FILTER_ADD && + p_params->opcode != QED_FILTER_REPLACE) + return 0; + + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) { + ether_addr_copy(p_vf->shadow_config.macs[i], + p_params->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Added MAC at %d entry in shadow\n", i); + break; + } + } + + if (i == QED_ETH_VF_NUM_MAC_FILTERS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n"); + return -EINVAL; + } + + return 0; +} + +static int +qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, + struct qed_filter_ucast *p_params) +{ + int rc = 0; + + if (p_params->type == QED_FILTER_MAC) { + rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params); + if (rc) + return rc; + } + + if (p_params->type == QED_FILTER_VLAN) + rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params); + + return rc; +} + +static int qed_iov_chk_ucast(struct qed_hwfn *hwfn, + int vfid, struct qed_filter_ucast *params) +{ + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf) + return -EINVAL; + + /* No real decision to make; Store the configured MAC */ + if (params->type == QED_FILTER_MAC || + params->type == QED_FILTER_MAC_VLAN) { + ether_addr_copy(vf->mac, params->mac); + + if (vf->is_trusted_configured) { + qed_iov_bulletin_set_mac(hwfn, vf->mac, vfid); + + /* Update and post bulleitin again */ + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + + return 0; +} + +static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_ucast_filter_tlv *req; + u8 status = PFVF_STATUS_SUCCESS; + struct qed_filter_ucast params; + int rc; + + /* Prepare the unicast filter params */ + memset(¶ms, 0, sizeof(struct qed_filter_ucast)); + req = &mbx->req_virt->ucast_filter; + params.opcode = (enum qed_filter_opcode)req->opcode; + params.type = (enum qed_filter_ucast_type)req->type; + + params.is_rx_filter = 1; + params.is_tx_filter = 1; + params.vport_to_remove_from = vf->vport_id; + params.vport_to_add_to = vf->vport_id; + memcpy(params.mac, req->mac, ETH_ALEN); + params.vlan = req->vlan; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %pM, vlan 0x%04x\n", + vf->abs_vf_id, params.opcode, params.type, + params.is_rx_filter ? "RX" : "", + params.is_tx_filter ? "TX" : "", + params.vport_to_add_to, + params.mac, params.vlan); + + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Update shadow copy of the VF configuration */ + if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms)) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Determine if the unicast filtering is acceptible by PF */ + if ((p_bulletin->valid_bitmap & BIT(VLAN_ADDR_FORCED)) && + (params.type == QED_FILTER_VLAN || + params.type == QED_FILTER_MAC_VLAN)) { + /* Once VLAN is forced or PVID is set, do not allow + * to add/replace any further VLANs. + */ + if (params.opcode == QED_FILTER_ADD || + params.opcode == QED_FILTER_REPLACE) + status = PFVF_STATUS_FORCED; + goto out; + } + + if ((p_bulletin->valid_bitmap & BIT(MAC_ADDR_FORCED)) && + (params.type == QED_FILTER_MAC || + params.type == QED_FILTER_MAC_VLAN)) { + if (!ether_addr_equal(p_bulletin->mac, params.mac) || + (params.opcode != QED_FILTER_ADD && + params.opcode != QED_FILTER_REPLACE)) + status = PFVF_STATUS_FORCED; + goto out; + } + + rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, ¶ms); + if (rc) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, + QED_SPQ_MODE_CB, NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + int i; + + /* Reset the SBs */ + for (i = 0; i < vf->num_sbs; i++) + qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, false); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, struct qed_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + + /* Disable Interrupts for VF */ + qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0); + + /* Reset Permission table */ + qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0); + + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, + length, status); +} + +static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) { + /* Stopping the VF */ + rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid, + p_vf->opaque_fid); + + if (rc) { + DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n", + rc); + status = PFVF_STATUS_FAILURE; + } + + p_vf->state = VF_STOPPED; + } + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, + length, status); +} + +static void qed_iov_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_read_coal_resp_tlv *p_resp; + struct vfpf_read_coal_req_tlv *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_vf_queue *p_queue; + struct qed_queue_cid *p_cid; + u16 coal = 0, qid, i; + bool b_is_rx; + int rc = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + req = &mbx->req_virt->read_coal_req; + + qid = req->qid; + b_is_rx = req->is_rx ? true : false; + + if (b_is_rx) { + if (!qed_iov_validate_rxq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + + p_cid = qed_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]); + rc = qed_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + } else { + if (!qed_iov_validate_txq(p_hwfn, p_vf, qid, + QED_IOV_VALIDATE_Q_ENABLE)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + p_vf->abs_vf_id, qid); + goto send_resp; + } + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + p_queue = &p_vf->vf_queues[qid]; + if ((!p_queue->cids[i].p_cid) || + (!p_queue->cids[i].b_is_tx)) + continue; + + p_cid = p_queue->cids[i].p_cid; + + rc = qed_get_txq_coalesce(p_hwfn, p_ptt, p_cid, &coal); + if (rc) + goto send_resp; + break; + } + } + + status = PFVF_STATUS_SUCCESS; + +send_resp: + p_resp = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_COALESCE_READ, + sizeof(*p_resp)); + p_resp->coal = coal; + + qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + +static void qed_iov_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *vf) +{ + struct qed_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_coalesce *req; + u8 status = PFVF_STATUS_FAILURE; + struct qed_queue_cid *p_cid; + u16 rx_coal, tx_coal; + int rc = 0, i; + u16 qid; + + req = &mbx->req_virt->update_coalesce; + + rx_coal = req->rx_coal; + tx_coal = req->tx_coal; + qid = req->qid; + + if (!qed_iov_validate_rxq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && rx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Rx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + if (!qed_iov_validate_txq(p_hwfn, vf, qid, + QED_IOV_VALIDATE_Q_ENABLE) && tx_coal) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d]: Invalid Tx queue_id = %d\n", + vf->abs_vf_id, qid); + goto out; + } + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n", + vf->abs_vf_id, rx_coal, tx_coal, qid); + + if (rx_coal) { + p_cid = qed_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]); + + rc = qed_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid); + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set rx queue = %d coalesce\n", + vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid); + goto out; + } + vf->rx_coal = rx_coal; + } + + if (tx_coal) { + struct qed_vf_queue *p_queue = &vf->vf_queues[qid]; + + for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) { + if (!p_queue->cids[i].p_cid) + continue; + + if (!p_queue->cids[i].b_is_tx) + continue; + + rc = qed_set_txq_coalesce(p_hwfn, p_ptt, tx_coal, + p_queue->cids[i].p_cid); + + if (rc) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%d]: Unable to set tx queue coalesce\n", + vf->abs_vf_id); + goto out; + } + } + vf->tx_coal = tx_coal; + } + + status = PFVF_STATUS_SUCCESS; +out: + qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static int +qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int cnt; + u32 val; + + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); + + for (cnt = 0; cnt < 50; cnt++) { + val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); + if (!val) + break; + msleep(20); + } + qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", + p_vf->abs_vf_id, val); + return -EBUSY; + } + + return 0; +} + +#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS) + +static int +qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp; + u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port; + u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine; + u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0; + u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0; + u8 port_id, tc, tc_id = 0, voq = 0; + int cnt; + + memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); + memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32)); + + /* Read initial consumers & producers */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC; + voq = VOQ(port_id, tc_id, max_phys_tcs_per_port); + cons[voq] = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + prod = qed_rd(p_hwfn, p_ptt, + prod_voq0_addr + voq * 0x40); + distance[voq] = prod - cons[voq]; + } + } + + /* Wait for consumers to pass the producers */ + port_id = 0; + tc = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; port_id < max_ports_per_engine; port_id++) { + /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */ + for (; tc < max_phys_tcs_per_port + 1; tc++) { + tc_id = (tc < max_phys_tcs_per_port) ? + tc : PURE_LB_TC; + voq = VOQ(port_id, + tc_id, max_phys_tcs_per_port); + tmp = qed_rd(p_hwfn, p_ptt, + cons_voq0_addr + voq * 0x40); + if (distance[voq] > tmp - cons[voq]) + break; + } + + if (tc == max_phys_tcs_per_port + 1) + tc = 0; + else + break; + } + + if (port_id == max_ports_per_engine) + break; + + msleep(20); + } + + if (cnt == 50) { + DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n", + p_vf->abs_vf_id, (int)voq); + + DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n", + (int)voq, (int)port_id, (int)tc_id); + + return -EBUSY; + } + + return 0; +} + +static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn, + struct qed_vf_info *p_vf, struct qed_ptt *p_ptt) +{ + int rc; + + rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + return 0; +} + +static int +qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 rel_vf_id, u32 *ack_vfs) +{ + struct qed_vf_info *p_vf; + int rc = 0; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!p_vf) + return 0; + + if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))) { + u16 vfid = p_vf->abs_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] - Handling FLR\n", vfid); + + qed_iov_vf_cleanup(p_hwfn, p_vf); + + /* If VF isn't active, no need for anything but SW */ + if (!p_vf->b_init) + goto cleanup; + + rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); + if (rc) + goto cleanup; + + rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true); + if (rc) { + DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); + return rc; + } + + /* Workaround to make VF-PF channel ready, as FW + * doesn't do that as a part of FLR. + */ + REG_WR(p_hwfn, + GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM, + USTORM_VF_PF_CHANNEL_READY, vfid), 1); + + /* VF_STOPPED has to be set only after final cleanup + * but prior to re-enabling the VF. + */ + p_vf->state = VF_STOPPED; + + rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + vfid); + return rc; + } +cleanup: + /* Mark VF for ack and clean pending state */ + if (p_vf->state == VF_RESET) + p_vf->state = VF_STOPPED; + ack_vfs[vfid / 32] |= BIT((vfid % 32)); + p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + p_vf->vf_mbx.b_pending_msg = false; + } + + return rc; +} + +static int +qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 ack_vfs[VF_MAX_STATIC / 32]; + int rc = 0; + u16 i; + + memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); + + /* Since BRB <-> PRS interface can't be tested as part of the flr + * polling due to HW limitations, simply sleep a bit. And since + * there's no need to wait per-vf, do it before looping. + */ + msleep(100); + + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) + qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); + + rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs) +{ + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n"); + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "[%08x,...,%08x]: %08x\n", + i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); + + if (!p_hwfn->cdev->p_iov_info) { + DP_NOTICE(p_hwfn, "VF flr but no IOV\n"); + return false; + } + + /* Mark VFs */ + for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_vf_info *p_vf; + u8 vfid; + + p_vf = qed_iov_get_vf_info(p_hwfn, i, false); + if (!p_vf) + continue; + + vfid = p_vf->abs_vf_id; + if (BIT((vfid % 32)) & p_disabled_vfs[vfid / 32]) { + u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; + u16 rel_vf_id = p_vf->relative_vf_id; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%d] [rel %d] got FLR-ed\n", + vfid, rel_vf_id); + + p_vf->state = VF_RESET; + + /* No need to lock here, since pending_flr should + * only change here and before ACKing MFw. Since + * MFW will not trigger an additional attention for + * VF flr until ACKs, we're safe. + */ + p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); + found = true; + } + } + + return found; +} + +static int qed_iov_get_link(struct qed_hwfn *p_hwfn, + u16 vfid, + struct qed_mcp_link_params *p_params, + struct qed_mcp_link_state *p_link, + struct qed_mcp_link_capabilities *p_caps) +{ + struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn, + vfid, + false); + struct qed_bulletin_content *p_bulletin; + + if (!p_vf) + return -EINVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + if (p_params) + __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin); + if (p_link) + __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin); + if (p_caps) + __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); + return 0; +} + +static int +qed_iov_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_vf_info *p_vf) +{ + struct qed_bulletin_content *p_bulletin = p_vf->bulletin.p_virt; + struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct vfpf_bulletin_update_mac_tlv *p_req; + u8 status = PFVF_STATUS_SUCCESS; + int rc = 0; + + if (!p_vf->p_vf_info.is_trusted_configured) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Blocking bulletin update request from untrusted VF[%d]\n", + p_vf->abs_vf_id); + status = PFVF_STATUS_NOT_SUPPORTED; + rc = -EINVAL; + goto send_status; + } + + p_req = &mbx->req_virt->bulletin_update_mac; + ether_addr_copy(p_bulletin->mac, p_req->mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Updated bulletin of VF[%d] with requested MAC[%pM]\n", + p_vf->abs_vf_id, p_req->mac); + +send_status: + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(struct pfvf_def_resp_tlv), status); + return rc; +} + +static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid) +{ + struct qed_iov_vf_mbx *mbx; + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return; + + mbx = &p_vf->vf_mbx; + + /* qed_iov_process_mbx_request */ + if (!mbx->b_pending_msg) { + DP_NOTICE(p_hwfn, + "VF[%02x]: Trying to process mailbox message when none is pending\n", + p_vf->abs_vf_id); + return; + } + mbx->b_pending_msg = false; + + mbx->first_tlv = mbx->req_virt->first_tlv; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF[%02x]: Processing mailbox message [type %04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + + /* check if tlv type is known */ + if (qed_iov_tlv_supported(mbx->first_tlv.tl.type) && + !p_vf->b_malicious) { + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_START: + qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_TEARDOWN: + qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_RXQ: + qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_TXQ: + qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_RXQS: + qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_TXQS: + qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_RXQ: + qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_UPDATE: + qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UCAST_FILTER: + qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_CLOSE: + qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_INT_CLEANUP: + qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_RELEASE: + qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_TUNN_PARAM: + qed_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_UPDATE: + qed_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_COALESCE_READ: + qed_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_BULLETIN_UPDATE_MAC: + qed_iov_vf_pf_bulletin_update_mac(p_hwfn, p_ptt, p_vf); + break; + } + } else if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n", + p_vf->abs_vf_id, mbx->first_tlv.tl.type); + + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_MALICIOUS); + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + DP_NOTICE(p_hwfn, + "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n", + p_vf->abs_vf_id, + mbx->first_tlv.tl.type, + mbx->first_tlv.tl.length, + mbx->first_tlv.padding, mbx->first_tlv.reply_address); + + /* Try replying in case reply address matches the acquisition's + * posted address. + */ + if (p_vf->acquire.first_tlv.reply_address && + (mbx->first_tlv.reply_address == + p_vf->acquire.first_tlv.reply_address)) { + qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, + mbx->first_tlv.tl.type, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_NOT_SUPPORTED); + } else { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF[%02x]: Can't respond to TLV - no valid reply address\n", + p_vf->abs_vf_id); + } + } +} + +static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events) +{ + int i; + + memset(events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH); + + qed_for_each_vf(p_hwfn, i) { + struct qed_vf_info *p_vf; + + p_vf = &p_hwfn->pf_iov_info->vfs_array[i]; + if (p_vf->vf_mbx.b_pending_msg) + events[i / 64] |= 1ULL << (i % 64); + } +} + +static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn, + u16 abs_vfid) +{ + u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf; + + if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Got indication for VF [abs 0x%08x] that cannot be handled by PF\n", + abs_vfid); + return NULL; + } + + return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min]; +} + +static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn, + u16 abs_vfid, struct regpair *vf_msg) +{ + struct qed_vf_info *p_vf = qed_sriov_get_vf_from_absid(p_hwfn, + abs_vfid); + + if (!p_vf) + return 0; + + /* List the physical address of the request so that handler + * could later on copy the message from it. + */ + p_vf->vf_mbx.pending_req = HILO_64(vf_msg->hi, vf_msg->lo); + + /* Mark the event and schedule the workqueue */ + p_vf->vf_mbx.b_pending_msg = true; + qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG); + + return 0; +} + +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id + (p_data->entity_id)); + if (!p_vf) + return; + + if (!p_vf->b_malicious) { + DP_NOTICE(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + + p_vf->b_malicious = true; + } else { + DP_INFO(p_hwfn, + "VF [%d] - Malicious behavior [%02x]\n", + p_vf->abs_vf_id, p_data->err_id); + } +} + +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo), + &data->vf_pf_channel.msg_addr); + default: + DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n", + opcode); + return -EINVAL; + } +} + +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info; + u16 i; + + if (!p_iov) + goto out; + + for (i = rel_vf_id; i < p_iov->total_vfs; i++) + if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false)) + return i; + +out: + return MAX_NUM_VFS; +} + +static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt, + int vfid) +{ + struct qed_dmae_params params; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return -EINVAL; + + memset(¶ms, 0, sizeof(params)); + SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1); + SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1); + params.src_vfid = vf_info->abs_vf_id; + + if (qed_dmae_host2host(p_hwfn, ptt, + vf_info->vf_mbx.pending_req, + vf_info->vf_mbx.req_phys, + sizeof(union vfpf_tlvs) / 4, ¶ms)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Failed to copy message from VF 0x%02x\n", vfid); + + return -EIO; + } + + return 0; +} + +static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced MAC to malicious VF [%d]\n", vfid); + return; + } + + if (vf_info->p_vf_info.is_trusted_configured) { + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + /* Trust mode will disable Forced MAC */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + } else { + feature = BIT(MAC_ADDR_FORCED); + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~BIT(VFPF_BULLETIN_MAC_ADDR); + } + + memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, "Can not set MAC, invalid vfid [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, "Can't set MAC to malicious VF [%d]\n", + vfid); + return -EINVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return -EINVAL; + } + + feature = BIT(VFPF_BULLETIN_MAC_ADDR); + ether_addr_copy(vf_info->bulletin.p_virt->mac, mac); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + if (vf_info->p_vf_info.is_trusted_configured) + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); + + return 0; +} + +static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn, + u16 pvid, int vfid) +{ + struct qed_vf_info *vf_info; + u64 feature; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + if (vf_info->b_malicious) { + DP_NOTICE(p_hwfn->cdev, + "Can't set forced vlan to malicious VF [%d]\n", vfid); + return; + } + + feature = 1 << VLAN_ADDR_FORCED; + vf_info->bulletin.p_virt->pvid = pvid; + if (pvid) + vf_info->bulletin.p_virt->valid_bitmap |= feature; + else + vf_info->bulletin.p_virt->valid_bitmap &= ~feature; + + qed_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, + int vfid, u16 vxlan_port, u16 geneve_port) +{ + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->cdev, + "Can not set udp ports, invalid vfid [%d]\n", vfid); + return; + } + + if (vf_info->b_malicious) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Can not set udp ports to malicious VF [%d]\n", + vfid); + return; + } + + vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port; + vf_info->bulletin.p_virt->geneve_udp_port = geneve_port; +} + +static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf_info) + return false; + + return !!p_vf_info->vport_instance; +} + +static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *p_vf_info; + + p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf_info) + return true; + + return p_vf_info->state == VF_STOPPED; +} + +static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return false; + + return vf_info->spoof_chk; +} + +static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val) +{ + struct qed_vf_info *vf; + int rc = -EINVAL; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set spoofchk\n"); + goto out; + } + + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + goto out; + + if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) { + /* After VF VPORT start PF will configure spoof check */ + vf->req_spoofchk_val = val; + rc = 0; + goto out; + } + + rc = __qed_iov_spoofchk_set(p_hwfn, vf, val); + +out: + return rc; +} + +static u8 *qed_iov_bulletin_get_mac(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & + BIT(VFPF_BULLETIN_MAC_ADDR))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + +static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) + return NULL; + + return p_vf->bulletin.p_virt->mac; +} + +static u16 +qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct qed_vf_info *p_vf; + + p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return 0; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & BIT(VLAN_ADDR_FORCED))) + return 0; + + return p_vf->bulletin.p_virt->pvid; +} + +static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, int vfid, int val) +{ + struct qed_vf_info *vf; + u8 abs_vp_id = 0; + u16 rl_id; + int rc; + + vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + return -EINVAL; + + rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); + if (rc) + return rc; + + rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */ + return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val, + QM_RL_TYPE_NORMAL); +} + +static int +qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) +{ + struct qed_vf_info *vf; + u8 vport_id; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set min rate\n"); + return -EINVAL; + } + } + + vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + if (!vf) + return -EINVAL; + + vport_id = vf->vport_id; + + return qed_configure_vport_wfq(cdev, vport_id, rate); +} + +static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid) +{ + struct qed_wfq_data *vf_vp_wfq; + struct qed_vf_info *vf_info; + + vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return 0; + + vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; + + if (vf_vp_wfq->configured) + return vf_vp_wfq->min_speed; + else + return 0; +} + +/** + * qed_schedule_iov - schedules IOV task for VF and PF + * @hwfn: hardware function pointer + * @flag: IOV flag for VF/PF + */ +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag) +{ + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(flag, &hwfn->iov_task_flags); + /* Memory barrier after setting atomic bit */ + smp_mb__after_atomic(); + DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag); + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0); +} + +void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) + queue_delayed_work(cdev->hwfns[i].iov_wq, + &cdev->hwfns[i].iov_task, 0); +} + +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + int i, j; + + for_each_hwfn(cdev, i) + if (cdev->hwfns[i].iov_wq) + flush_workqueue(cdev->hwfns[i].iov_wq); + + /* Mark VFs for disablement */ + qed_iov_set_vfs_to_disable(cdev, true); + + if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled) + pci_disable_sriov(cdev->pdev); + + if (cdev->recov_in_prog) { + DP_VERBOSE(cdev, + QED_MSG_IOV, + "Skip SRIOV disable operations in the device since a recovery is in progress\n"); + goto out; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + /* Failure to acquire the ptt in 100g creates an odd error + * where the first engine has already relased IOV. + */ + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + return -EBUSY; + } + + /* Clean WFQ db and configure equal weight for all vports */ + qed_clean_wfq_db(hwfn, ptt); + + qed_for_each_vf(hwfn, j) { + int k; + + if (!qed_iov_is_valid_vfid(hwfn, j, true, false)) + continue; + + /* Wait until VF is disabled before releasing */ + for (k = 0; k < 100; k++) { + if (!qed_iov_is_vf_stopped(hwfn, j)) + msleep(20); + else + break; + } + + if (k < 100) + qed_iov_release_hw_for_vf(&cdev->hwfns[i], + ptt, j); + else + DP_ERR(hwfn, + "Timeout waiting for VF's FLR to end\n"); + } + + qed_ptt_release(hwfn, ptt); + } +out: + qed_iov_set_vfs_to_disable(cdev, false); + + return 0; +} + +static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn, + u16 vfid, + struct qed_iov_vf_init_params *params) +{ + u16 base, i; + + /* Since we have an equal resource distribution per-VF, and we assume + * PF has acquired the QED_PF_L2_QUE first queues, we start setting + * sequentially from there. + */ + base = FEAT_NUM(hwfn, QED_PF_L2_QUE) + vfid * params->num_queues; + + params->rel_vf_id = vfid; + for (i = 0; i < params->num_queues; i++) { + params->req_rx_queue[i] = base + i; + params->req_tx_queue[i] = base + i; + } +} + +static int qed_sriov_enable(struct qed_dev *cdev, int num) +{ + struct qed_iov_vf_init_params params; + struct qed_hwfn *hwfn; + struct qed_ptt *ptt; + int i, j, rc; + + if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { + DP_NOTICE(cdev, "Can start at most %d VFs\n", + RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1); + return -EINVAL; + } + + memset(¶ms, 0, sizeof(params)); + + /* Initialize HW for VF access */ + for_each_hwfn(cdev, j) { + hwfn = &cdev->hwfns[j]; + ptt = qed_ptt_acquire(hwfn); + + /* Make sure not to use more than 16 queues per VF */ + params.num_queues = min_t(int, + FEAT_NUM(hwfn, QED_VF_L2_QUE) / num, + 16); + + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + for (i = 0; i < num; i++) { + if (!qed_iov_is_valid_vfid(hwfn, i, false, true)) + continue; + + qed_sriov_enable_qid_config(hwfn, i, ¶ms); + rc = qed_iov_init_hw_for_vf(hwfn, ptt, ¶ms); + if (rc) { + DP_ERR(cdev, "Failed to enable VF[%d]\n", i); + qed_ptt_release(hwfn, ptt); + goto err; + } + } + + qed_ptt_release(hwfn, ptt); + } + + /* Enable SRIOV PCIe functions */ + rc = pci_enable_sriov(cdev->pdev, num); + if (rc) { + DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc); + goto err; + } + + hwfn = QED_LEADING_HWFN(cdev); + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_ERR(hwfn, "Failed to acquire ptt\n"); + rc = -EBUSY; + goto err; + } + + rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB); + if (rc) + DP_INFO(cdev, "Failed to update eswitch mode\n"); + qed_ptt_release(hwfn, ptt); + + return num; + +err: + qed_sriov_disable(cdev, false); + return rc; +} + +static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param) +{ + if (!IS_QED_SRIOV(cdev)) { + DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n"); + return -EOPNOTSUPP; + } + + if (num_vfs_param) + return qed_sriov_enable(cdev, num_vfs_param); + else + return qed_sriov_disable(cdev, true); +} + +static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the MAC, and schedule the IOV task */ + if (vf_info->is_trusted_configured) + ether_addr_copy(vf_info->mac, mac); + else + ether_addr_copy(vf_info->forced_mac, mac); + + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + +static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid) +{ + int i; + + if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set a VF MAC; Sriov is not enabled\n"); + return -EINVAL; + } + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "Cannot set VF[%d] MAC (VF is not active)\n", vfid); + return -EINVAL; + } + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true); + if (!vf_info) + continue; + + /* Set the forced vlan, and schedule the IOV task */ + vf_info->forced_vlan = vid; + qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG); + } + + return 0; +} + +static int qed_get_vf_config(struct qed_dev *cdev, + int vf_id, struct ifla_vf_info *ivi) +{ + struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); + struct qed_public_vf_info *vf_info; + struct qed_mcp_link_state link; + u32 tx_rate; + int ret; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, false)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + + ret = qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL); + if (ret) + return ret; + + /* Fill information about VF */ + ivi->vf = vf_id; + + if (is_valid_ether_addr(vf_info->forced_mac)) + ether_addr_copy(ivi->mac, vf_info->forced_mac); + else + ether_addr_copy(ivi->mac, vf_info->mac); + + ivi->vlan = vf_info->forced_vlan; + ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id); + ivi->linkstate = vf_info->link_state; + tx_rate = vf_info->tx_rate; + ivi->max_tx_rate = tx_rate ? tx_rate : link.speed; + ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id); + ivi->trusted = vf_info->is_trusted_request; + + return 0; +} + +void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ + struct qed_hwfn *lead_hwfn = QED_LEADING_HWFN(hwfn->cdev); + struct qed_mcp_link_capabilities caps; + struct qed_mcp_link_params params; + struct qed_mcp_link_state link; + int i; + + if (!hwfn->pf_iov_info) + return; + + /* Update bulletin of all future possible VFs with link configuration */ + for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) { + struct qed_public_vf_info *vf_info; + + vf_info = qed_iov_get_public_vf_info(hwfn, i, false); + if (!vf_info) + continue; + + /* Only hwfn0 is actually interested in the link speed. + * But since only it would receive an MFW indication of link, + * need to take configuration from it - otherwise things like + * rate limiting for hwfn1 VF would not work. + */ + memcpy(¶ms, qed_mcp_get_link_params(lead_hwfn), + sizeof(params)); + memcpy(&link, qed_mcp_get_link_state(lead_hwfn), sizeof(link)); + memcpy(&caps, qed_mcp_get_link_capabilities(lead_hwfn), + sizeof(caps)); + + /* Modify link according to the VF's configured link state */ + switch (vf_info->link_state) { + case IFLA_VF_LINK_STATE_DISABLE: + link.link_up = false; + break; + case IFLA_VF_LINK_STATE_ENABLE: + link.link_up = true; + /* Set speed according to maximum supported by HW. + * that is 40G for regular devices and 100G for CMT + * mode devices. + */ + link.speed = (hwfn->cdev->num_hwfns > 1) ? + 100000 : 40000; + break; + default: + /* In auto mode pass PF link image to VF */ + break; + } + + if (link.link_up && vf_info->tx_rate) { + struct qed_ptt *ptt; + int rate; + + rate = min_t(int, vf_info->tx_rate, link.speed); + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed to acquire PTT\n"); + return; + } + + if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) { + vf_info->tx_rate = rate; + link.speed = rate; + } + + qed_ptt_release(hwfn, ptt); + } + + qed_iov_set_link(hwfn, i, ¶ms, &link, &caps); + } + + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); +} + +static int qed_set_vf_link_state(struct qed_dev *cdev, + int vf_id, int link_state) +{ + int i; + + /* Sanitize request */ + if (IS_VF(cdev)) + return -EINVAL; + + if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true, true)) { + DP_VERBOSE(cdev, QED_MSG_IOV, + "VF index [%d] isn't active\n", vf_id); + return -EINVAL; + } + + /* Handle configuration of link state */ + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + vf = qed_iov_get_public_vf_info(hwfn, vf_id, true); + if (!vf) + continue; + + if (vf->link_state == link_state) + continue; + + vf->link_state = link_state; + qed_inform_vf_link_state(&cdev->hwfns[i]); + } + + return 0; +} + +static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val) +{ + int i, rc = -EINVAL; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + rc = qed_iov_spoofchk_set(p_hwfn, vfid, val); + if (rc) + break; + } + + return rc; +} + +static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, + "SR-IOV sanity check failed, can't set tx rate\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true); + + vf->tx_rate = rate; + + qed_inform_vf_link_state(p_hwfn); + } + + return 0; +} + +static int qed_set_vf_rate(struct qed_dev *cdev, + int vfid, u32 min_rate, u32 max_rate) +{ + int rc_min = 0, rc_max = 0; + + if (max_rate) + rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate); + + if (min_rate) + rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate); + + if (rc_max | rc_min) + return -EINVAL; + + return 0; +} + +static int qed_set_vf_trust(struct qed_dev *cdev, int vfid, bool trust) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *hwfn = &cdev->hwfns[i]; + struct qed_public_vf_info *vf; + + if (!qed_iov_pf_sanity_check(hwfn, vfid)) { + DP_NOTICE(hwfn, + "SR-IOV sanity check failed, can't set trust\n"); + return -EINVAL; + } + + vf = qed_iov_get_public_vf_info(hwfn, vfid, true); + + if (vf->is_trusted_request == trust) + return 0; + vf->is_trusted_request = trust; + + qed_schedule_iov(hwfn, QED_IOV_WQ_TRUST_FLAG); + } + + return 0; +} + +static void qed_handle_vf_msg(struct qed_hwfn *hwfn) +{ + u64 events[QED_VF_ARRAY_LENGTH]; + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Can't acquire PTT; re-scheduling\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG); + return; + } + + qed_iov_pf_get_pending_events(hwfn, events); + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n", + events[0], events[1], events[2]); + + qed_for_each_vf(hwfn, i) { + /* Skip VFs with no pending messages */ + if (!(events[i / 64] & (1ULL << (i % 64)))) + continue; + + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", + i, hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Copy VF's message to PF's request buffer for that VF */ + if (qed_iov_copy_vf_msg(hwfn, ptt, i)) + continue; + + qed_iov_process_mbx_req(hwfn, ptt, i); + } + + qed_ptt_release(hwfn, ptt); +} + +static bool qed_pf_validate_req_vf_mac(struct qed_hwfn *hwfn, + u8 *mac, + struct qed_public_vf_info *info) +{ + if (info->is_trusted_configured) { + if (is_valid_ether_addr(info->mac) && + (!mac || !ether_addr_equal(mac, info->mac))) + return true; + } else { + if (is_valid_ether_addr(info->forced_mac) && + (!mac || !ether_addr_equal(mac, info->forced_mac))) + return true; + } + + return false; +} + +static void qed_set_bulletin_mac(struct qed_hwfn *hwfn, + struct qed_public_vf_info *info, + int vfid) +{ + if (info->is_trusted_configured) + qed_iov_bulletin_set_mac(hwfn, info->mac, vfid); + else + qed_iov_bulletin_set_forced_mac(hwfn, info->forced_mac, vfid); +} + +static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn) +{ + int i; + + qed_for_each_vf(hwfn, i) { + struct qed_public_vf_info *info; + bool update = false; + u8 *mac; + + info = qed_iov_get_public_vf_info(hwfn, i, true); + if (!info) + continue; + + /* Update data on bulletin board */ + if (info->is_trusted_configured) + mac = qed_iov_bulletin_get_mac(hwfn, i); + else + mac = qed_iov_bulletin_get_forced_mac(hwfn, i); + + if (qed_pf_validate_req_vf_mac(hwfn, mac, info)) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n", + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + + /* Update bulletin board with MAC */ + qed_set_bulletin_mac(hwfn, info, i); + update = true; + } + + if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^ + info->forced_vlan) { + DP_VERBOSE(hwfn, + QED_MSG_IOV, + "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n", + info->forced_vlan, + i, + hwfn->cdev->p_iov_info->first_vf_in_pf + i); + qed_iov_bulletin_set_forced_vlan(hwfn, + info->forced_vlan, i); + update = true; + } + + if (update) + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + +static void qed_handle_bulletin_post(struct qed_hwfn *hwfn) +{ + struct qed_ptt *ptt; + int i; + + ptt = qed_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, "Failed allocating a ptt entry\n"); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + return; + } + + qed_for_each_vf(hwfn, i) + qed_iov_post_vf_bulletin(hwfn, i, ptt); + + qed_ptt_release(hwfn, ptt); +} + +static void qed_update_mac_for_vf_trust_change(struct qed_hwfn *hwfn, int vf_id) +{ + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 *force_mac; + int i; + + vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true); + vf = qed_iov_get_vf_info(hwfn, vf_id, true); + + if (!vf_info || !vf) + return; + + /* Force MAC converted to generic MAC in case of VF trust on */ + if (vf_info->is_trusted_configured && + (vf->bulletin.p_virt->valid_bitmap & BIT(MAC_ADDR_FORCED))) { + force_mac = qed_iov_bulletin_get_forced_mac(hwfn, vf_id); + + if (force_mac) { + /* Clear existing shadow copy of MAC to have a clean + * slate. + */ + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + vf_info->mac)) { + eth_zero_addr(vf->shadow_config.macs[i]); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow MAC %pM removed for VF 0x%02x, VF trust mode is ON\n", + vf_info->mac, vf_id); + break; + } + } + + ether_addr_copy(vf_info->mac, force_mac); + eth_zero_addr(vf_info->forced_mac); + vf->bulletin.p_virt->valid_bitmap &= + ~BIT(MAC_ADDR_FORCED); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } + } + + /* Update shadow copy with VF MAC when trust mode is turned off */ + if (!vf_info->is_trusted_configured) { + u8 empty_mac[ETH_ALEN]; + + eth_zero_addr(empty_mac); + for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) { + if (ether_addr_equal(vf->shadow_config.macs[i], + empty_mac)) { + ether_addr_copy(vf->shadow_config.macs[i], + vf_info->mac); + DP_VERBOSE(hwfn, QED_MSG_IOV, + "Shadow is updated with %pM for VF 0x%02x, VF trust mode is OFF\n", + vf_info->mac, vf_id); + break; + } + } + /* Clear bulletin when trust mode is turned off, + * to have a clean slate for next (normal) operations. + */ + qed_iov_bulletin_set_mac(hwfn, empty_mac, vf_id); + qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG); + } +} + +static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) +{ + struct qed_sp_vport_update_params params; + struct qed_filter_accept_flags *flags; + struct qed_public_vf_info *vf_info; + struct qed_vf_info *vf; + u8 mask; + int i; + + mask = QED_ACCEPT_UCAST_UNMATCHED | QED_ACCEPT_MCAST_UNMATCHED; + flags = ¶ms.accept_flags; + + qed_for_each_vf(hwfn, i) { + /* Need to make sure current requested configuration didn't + * flip so that we'll end up configuring something that's not + * needed. + */ + vf_info = qed_iov_get_public_vf_info(hwfn, i, true); + if (vf_info->is_trusted_configured == + vf_info->is_trusted_request) + continue; + vf_info->is_trusted_configured = vf_info->is_trusted_request; + + /* Handle forced MAC mode */ + qed_update_mac_for_vf_trust_change(hwfn, i); + + /* Validate that the VF has a configured vport */ + vf = qed_iov_get_vf_info(hwfn, i, true); + if (!vf || !vf->vport_instance) + continue; + + memset(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + + params.update_ctl_frame_check = 1; + params.mac_chk_en = !vf_info->is_trusted_configured; + params.update_accept_any_vlan_flg = 0; + + if (vf_info->accept_any_vlan && vf_info->forced_vlan) { + params.update_accept_any_vlan_flg = 1; + params.accept_any_vlan = vf_info->accept_any_vlan; + } + + if (vf_info->rx_accept_mode & mask) { + flags->update_rx_mode_config = 1; + flags->rx_accept_filter = vf_info->rx_accept_mode; + } + + if (vf_info->tx_accept_mode & mask) { + flags->update_tx_mode_config = 1; + flags->tx_accept_filter = vf_info->tx_accept_mode; + } + + /* Remove if needed; Otherwise this would set the mask */ + if (!vf_info->is_trusted_configured) { + flags->rx_accept_filter &= ~mask; + flags->tx_accept_filter &= ~mask; + params.accept_any_vlan = false; + } + + if (flags->update_rx_mode_config || + flags->update_tx_mode_config || + params.update_ctl_frame_check || + params.update_accept_any_vlan_flg) { + DP_VERBOSE(hwfn, QED_MSG_IOV, + "vport update config for %s VF[abs 0x%x rel 0x%x]\n", + vf_info->is_trusted_configured ? "trusted" : "untrusted", + vf->abs_vf_id, vf->relative_vf_id); + qed_sp_vport_update(hwfn, ¶ms, + QED_SPQ_MODE_EBLOCK, NULL); + } + } +} + +static void qed_iov_pf_task(struct work_struct *work) + +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + int rc; + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) { + struct qed_ptt *ptt = qed_ptt_acquire(hwfn); + + if (!ptt) { + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + return; + } + + rc = qed_iov_vf_flr_cleanup(hwfn, ptt); + if (rc) + qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG); + + qed_ptt_release(hwfn, ptt); + } + + if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags)) + qed_handle_vf_msg(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + &hwfn->iov_task_flags)) + qed_handle_pf_set_vf_unicast(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + &hwfn->iov_task_flags)) + qed_handle_bulletin_post(hwfn); + + if (test_and_clear_bit(QED_IOV_WQ_TRUST_FLAG, &hwfn->iov_task_flags)) + qed_iov_handle_trust_change(hwfn); +} + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ + int i; + + for_each_hwfn(cdev, i) { + if (!cdev->hwfns[i].iov_wq) + continue; + + if (schedule_first) { + qed_schedule_iov(&cdev->hwfns[i], + QED_IOV_WQ_STOP_WQ_FLAG); + cancel_delayed_work_sync(&cdev->hwfns[i].iov_task); + } + + destroy_workqueue(cdev->hwfns[i].iov_wq); + } +} + +int qed_iov_wq_start(struct qed_dev *cdev) +{ + char name[NAME_SIZE]; + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + + /* PFs needs a dedicated workqueue only if they support IOV. + * VFs always require one. + */ + if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn)) + continue; + + snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x", + cdev->pdev->bus->number, + PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id); + + p_hwfn->iov_wq = create_singlethread_workqueue(name); + if (!p_hwfn->iov_wq) { + DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n"); + return -ENOMEM; + } + + if (IS_PF(cdev)) + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task); + else + INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task); + } + + return 0; +} + +const struct qed_iov_hv_ops qed_iov_ops_pass = { + .configure = &qed_sriov_configure, + .set_mac = &qed_sriov_pf_set_mac, + .set_vlan = &qed_sriov_pf_set_vlan, + .get_config = &qed_get_vf_config, + .set_link_state = &qed_set_vf_link_state, + .set_spoof = &qed_spoof_configure, + .set_rate = &qed_set_vf_rate, + .set_trust = &qed_set_vf_trust, +}; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h new file mode 100644 index 0000000000..6ee2493de1 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -0,0 +1,501 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#ifndef _QED_SRIOV_H +#define _QED_SRIOV_H +#include <linux/types.h> +#include "qed_vf.h" + +#define QED_ETH_VF_NUM_MAC_FILTERS 1 +#define QED_ETH_VF_NUM_VLAN_FILTERS 2 +#define QED_VF_ARRAY_LENGTH (3) + +#ifdef CONFIG_QED_SRIOV +#define IS_VF(cdev) ((cdev)->b_is_vf) +#define IS_PF(cdev) (!((cdev)->b_is_vf)) +#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) +#else +#define IS_VF(cdev) (0) +#define IS_PF(cdev) (1) +#define IS_PF_SRIOV(p_hwfn) (0) +#endif +#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) + +#define QED_MAX_VF_CHAINS_PER_PF 16 + +#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \ + (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS) + +enum qed_iov_vport_update_flag { + QED_IOV_VP_UPDATE_ACTIVATE, + QED_IOV_VP_UPDATE_VLAN_STRIP, + QED_IOV_VP_UPDATE_TX_SWITCH, + QED_IOV_VP_UPDATE_MCAST, + QED_IOV_VP_UPDATE_ACCEPT_PARAM, + QED_IOV_VP_UPDATE_RSS, + QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN, + QED_IOV_VP_UPDATE_SGE_TPA, + QED_IOV_VP_UPDATE_MAX, +}; + +struct qed_public_vf_info { + /* These copies will later be reflected in the bulletin board, + * but this copy should be newer. + */ + u8 forced_mac[ETH_ALEN]; + u16 forced_vlan; + u8 mac[ETH_ALEN]; + + /* IFLA_VF_LINK_STATE_<X> */ + int link_state; + + /* Currently configured Tx rate in MB/sec. 0 if unconfigured */ + int tx_rate; + + /* Trusted VFs can configure promiscuous mode. + * Also store shadow promisc configuration if needed. + */ + bool is_trusted_configured; + bool is_trusted_request; + u8 rx_accept_mode; + u8 tx_accept_mode; + bool accept_any_vlan; +}; + +struct qed_iov_vf_init_params { + u16 rel_vf_id; + + /* Number of requested Queues; Currently, don't support different + * number of Rx/Tx queues. + */ + + u16 num_queues; + + /* Allow the client to choose which qzones to use for Rx/Tx, + * and which queue_base to use for Tx queues on a per-queue basis. + * Notice values should be relative to the PF resources. + */ + u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF]; + u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF]; +}; + +/* This struct is part of qed_dev and contains data relevant to all hwfns; + * Initialized only if SR-IOV cpabability is exposed in PCIe config space. + */ +struct qed_hw_sriov_info { + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs associated with the PF */ + u16 num_vfs; /* number of vfs that have been started */ + u16 initial_vfs; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device_id; /* VF device id */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + + u32 first_vf_in_pf; +}; + +/* This mailbox is maintained per VF in its PF contains all information + * required for sending / receiving a message. + */ +struct qed_iov_vf_mbx { + union vfpf_tlvs *req_virt; + dma_addr_t req_phys; + union pfvf_tlvs *reply_virt; + dma_addr_t reply_phys; + + /* Address in VF where a pending message is located */ + dma_addr_t pending_req; + + /* Message from VF awaits handling */ + bool b_pending_msg; + + u8 *offset; + + /* saved VF request header */ + struct vfpf_first_tlv first_tlv; +}; + +#define QED_IOV_LEGACY_QID_RX (0) +#define QED_IOV_LEGACY_QID_TX (1) +#define QED_IOV_QID_INVALID (0xFE) + +struct qed_vf_queue_cid { + bool b_is_tx; + struct qed_queue_cid *p_cid; +}; + +/* Describes a qzone associated with the VF */ +struct qed_vf_queue { + u16 fw_rx_qid; + u16 fw_tx_qid; + + struct qed_vf_queue_cid cids[MAX_QUEUES_PER_QZONE]; +}; + +enum vf_state { + VF_FREE = 0, /* VF ready to be acquired holds no resc */ + VF_ACQUIRED, /* VF, acquired, but not initialized */ + VF_ENABLED, /* VF, Enabled */ + VF_RESET, /* VF, FLR'd, pending cleanup */ + VF_STOPPED /* VF, Stopped */ +}; + +struct qed_vf_vlan_shadow { + bool used; + u16 vid; +}; + +struct qed_vf_shadow_config { + /* Shadow copy of all guest vlans */ + struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1]; + + /* Shadow copy of all configured MACs; Empty if forcing MACs */ + u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN]; + u8 inner_vlan_removal; +}; + +/* PFs maintain an array of this structure, per VF */ +struct qed_vf_info { + struct qed_iov_vf_mbx vf_mbx; + enum vf_state state; + bool b_init; + bool b_malicious; + u8 to_disable; + + struct qed_bulletin bulletin; + dma_addr_t vf_bulletin; + + /* PF saves a copy of the last VF acquire message */ + struct vfpf_acquire_tlv acquire; + + u32 concrete_fid; + u16 opaque_fid; + u16 mtu; + + u8 vport_id; + u8 relative_vf_id; + u8 abs_vf_id; +#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \ + (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ + (p_vf)->abs_vf_id) + + u8 vport_instance; + u8 num_rxqs; + u8 num_txqs; + + u16 rx_coal; + u16 tx_coal; + + u8 num_sbs; + + u8 num_mac_filters; + u8 num_vlan_filters; + + struct qed_vf_queue vf_queues[QED_MAX_VF_CHAINS_PER_PF]; + u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF]; + u8 num_active_rxqs; + struct qed_public_vf_info p_vf_info; + bool spoof_chk; + bool req_spoofchk_val; + + /* Stores the configuration requested by VF */ + struct qed_vf_shadow_config shadow_config; + + /* A bitfield using bulletin's valid-map bits, used to indicate + * which of the bulletin board features have been configured. + */ + u64 configured_features; +#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ + (1 << VLAN_ADDR_FORCED)) +}; + +/* This structure is part of qed_hwfn and used only for PFs that have sriov + * capability enabled. + */ +struct qed_pf_iov { + struct qed_vf_info vfs_array[MAX_NUM_VFS]; + u64 pending_flr[QED_VF_ARRAY_LENGTH]; + + /* Allocate message address continuosuly and split to each VF */ + void *mbx_msg_virt_addr; + dma_addr_t mbx_msg_phys_addr; + u32 mbx_msg_size; + void *mbx_reply_virt_addr; + dma_addr_t mbx_reply_phys_addr; + u32 mbx_reply_size; + void *p_bulletins; + dma_addr_t bulletins_phys; + u32 bulletins_size; +}; + +enum qed_iov_wq_flag { + QED_IOV_WQ_MSG_FLAG, + QED_IOV_WQ_SET_UNICAST_FILTER_FLAG, + QED_IOV_WQ_BULLETIN_UPDATE_FLAG, + QED_IOV_WQ_STOP_WQ_FLAG, + QED_IOV_WQ_FLR_FLAG, + QED_IOV_WQ_TRUST_FLAG, + QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, +}; + +extern const struct qed_iov_hv_ops qed_iov_ops_pass; + +#ifdef CONFIG_QED_SRIOV +/** + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. + * + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. + * + * Return: bool - true for valid VF ID + */ +bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only, bool b_non_malicious); + +/** + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. + * + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. + * + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. + */ +u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); + +void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, + int vfid, u16 vxlan_port, u16 geneve_port); + +/** + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_iov_hw_info(struct qed_hwfn *p_hwfn); + +/** + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset + * + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. + * + * Return: pointer to the newly placed tlv + */ +void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); + +/** + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer + * + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. + */ +void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); + +/** + * qed_sriov_vfpf_malicious(): Handle malicious VF/PF. + * + * @p_hwfn: HW device data. + * @p_data: Pointer to data. + * + * Return: Void. + */ +void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data); + +/** + * qed_sriov_eqe_event(): Callback for SRIOV events. + * + * @p_hwfn: HW device data. + * @opcode: Opcode. + * @echo: Echo. + * @data: data + * @fw_return_code: FW return code. + * + * Return: Int. + */ +int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo, + union event_ring_data *data, u8 fw_return_code); + +/** + * qed_iov_alloc(): allocate sriov related resources + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_iov_alloc(struct qed_hwfn *p_hwfn); + +/** + * qed_iov_setup(): setup sriov related resources + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_iov_setup(struct qed_hwfn *p_hwfn); + +/** + * qed_iov_free(): free sriov related resources + * + * @p_hwfn: HW device data. + * + * Return: Void. + */ +void qed_iov_free(struct qed_hwfn *p_hwfn); + +/** + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. + * + * Return: Void. + */ +void qed_iov_free_hw_info(struct qed_dev *cdev); + +/** + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. + * + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed + * + * Return: true iff one of the PF's vfs got FLRed. false otherwise. + */ +bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); + +/** + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. + * + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV + * + * Return: pointer to tlv type if found, otherwise returns NULL. + */ +void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type); + +void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first); +int qed_iov_wq_start(struct qed_dev *cdev); + +void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag); +void qed_vf_start_iov_wq(struct qed_dev *cdev); +int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); +void qed_inform_vf_link_state(struct qed_hwfn *hwfn); +#else +static inline bool +qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only, bool b_non_malicious) +{ + return false; +} + +static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return MAX_NUM_VFS; +} + +static inline void +qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, + u16 vxlan_port, u16 geneve_port) +{ +} + +static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn) +{ + return 0; +} + +static inline void qed_iov_setup(struct qed_hwfn *p_hwfn) +{ +} + +static inline void qed_iov_free(struct qed_hwfn *p_hwfn) +{ +} + +static inline void qed_iov_free_hw_info(struct qed_dev *cdev) +{ +} + +static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, + u32 *disabled_vfs) +{ + return false; +} + +static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first) +{ +} + +static inline int qed_iov_wq_start(struct qed_dev *cdev) +{ + return 0; +} + +static inline void qed_schedule_iov(struct qed_hwfn *hwfn, + enum qed_iov_wq_flag flag) +{ +} + +static inline void qed_vf_start_iov_wq(struct qed_dev *cdev) +{ +} + +static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled) +{ + return 0; +} + +static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn) +{ +} + +static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, + struct fw_err_data *p_data) +{ +} + +static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, + __le16 echo, union event_ring_data *data, + u8 fw_return_code) +{ + return 0; +} +#endif + +#define qed_for_each_vf(_p_hwfn, _i) \ + for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \ + _i < MAX_NUM_VFS; \ + _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1)) + +#endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c new file mode 100644 index 0000000000..0e265ed1f5 --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c @@ -0,0 +1,1707 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + * Copyright (c) 2019-2020 Marvell International Ltd. + */ + +#include <linux/crc32.h> +#include <linux/etherdevice.h> +#include "qed.h" +#include "qed_sriov.h" +#include "qed_vf.h" + +static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + void *p_tlv; + + /* This lock is released when we receive PF's response + * in qed_send_msg2pf(). + * So, qed_vf_pf_prep() and qed_send_msg2pf() + * must come in sequence. + */ + mutex_lock(&(p_iov->mutex)); + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "preparing to send 0x%04x tlv over vf pf channel\n", + type); + + /* Reset Request offset */ + p_iov->offset = (u8 *)p_iov->vf2pf_request; + + /* Clear mailbox - both request and reply */ + memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* Init type and length */ + p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length); + + /* Init first tlv header */ + ((struct vfpf_first_tlv *)p_tlv)->reply_address = + (u64)p_iov->pf2vf_reply_phys; + + return p_tlv; +} + +static void qed_vf_pf_req_end(struct qed_hwfn *p_hwfn, int req_status) +{ + union pfvf_tlvs *resp = p_hwfn->vf_iov_info->pf2vf_reply; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF request status = 0x%x, PF reply status = 0x%x\n", + req_status, resp->default_resp.hdr.status); + + mutex_unlock(&(p_hwfn->vf_iov_info->mutex)); +} + +#define QED_VF_CHANNEL_USLEEP_ITERATIONS 90 +#define QED_VF_CHANNEL_USLEEP_DELAY 100 +#define QED_VF_CHANNEL_MSLEEP_ITERATIONS 10 +#define QED_VF_CHANNEL_MSLEEP_DELAY 25 + +static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done) +{ + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + struct ustorm_vf_zone *zone_data; + int iter, rc = 0; + + zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B; + + /* output tlvs list */ + qed_dp_tlv_list(p_hwfn, p_req); + + /* Send TLVs over HW channel */ + memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); + trigger.vf_pf_msg_valid = 1; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", + GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID), + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys), + &zone_data->non_trigger.vf_pf_msg_addr, + *((u32 *)&trigger), &zone_data->trigger); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, + lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + REG_WR(p_hwfn, + (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, + upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + /* The message data must be written first, to prevent trigger before + * data is written. + */ + wmb(); + + REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger)); + + /* When PF would be done with the response, it would write back to the + * `done' address from a coherent DMA zone. Poll until then. + */ + + iter = QED_VF_CHANNEL_USLEEP_ITERATIONS; + while (!*done && iter--) { + udelay(QED_VF_CHANNEL_USLEEP_DELAY); + dma_rmb(); + } + + iter = QED_VF_CHANNEL_MSLEEP_ITERATIONS; + while (!*done && iter--) { + msleep(QED_VF_CHANNEL_MSLEEP_DELAY); + dma_rmb(); + } + + if (!*done) { + DP_NOTICE(p_hwfn, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); + rc = -EBUSY; + } else { + if ((*done != PFVF_STATUS_SUCCESS) && + (*done != PFVF_STATUS_NO_RESOURCE)) + DP_NOTICE(p_hwfn, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + else + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + } + + return rc; +} + +static void qed_vf_pf_add_qid(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_qid_tlv *p_qid_tlv; + + /* Only add QIDs for the queue if it was negotiated with PF */ + if (!(p_iov->acquire_resp.pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + return; + + p_qid_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_QID, sizeof(*p_qid_tlv)); + p_qid_tlv->qid = p_cid->qid_usage_idx; +} + +static int _qed_vf_pf_release(struct qed_hwfn *p_hwfn, bool b_final) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + u32 size; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + + if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = -EAGAIN; + + qed_vf_pf_req_end(p_hwfn, rc); + if (!b_final) + return rc; + + p_hwfn->b_int_enabled = 0; + + if (p_iov->vf2pf_request) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys); + if (p_iov->pf2vf_reply) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct qed_bulletin_content); + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + size, + p_iov->bulletin.p_virt, p_iov->bulletin.phys); + } + + kfree(p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = NULL; + + return rc; +} + +int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + return _qed_vf_pf_release(p_hwfn, true); +} + +#define VF_ACQUIRE_THRESH 3 +static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn, + struct vf_pf_resc_request *p_req, + struct pf_vf_resc *p_resp) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "PF unwilling to fulfill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]. Try PF recommended amount\n", + p_req->num_rxqs, + p_resp->num_rxqs, + p_req->num_rxqs, + p_resp->num_txqs, + p_req->num_sbs, + p_resp->num_sbs, + p_req->num_mac_filters, + p_resp->num_mac_filters, + p_req->num_vlan_filters, + p_resp->num_vlan_filters, + p_req->num_mc_filters, + p_resp->num_mc_filters, p_req->num_cids, p_resp->num_cids); + + /* humble our request */ + p_req->num_txqs = p_resp->num_txqs; + p_req->num_rxqs = p_resp->num_rxqs; + p_req->num_sbs = p_resp->num_sbs; + p_req->num_mac_filters = p_resp->num_mac_filters; + p_req->num_vlan_filters = p_resp->num_vlan_filters; + p_req->num_mc_filters = p_resp->num_mc_filters; + p_req->num_cids = p_resp->num_cids; +} + +static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct vf_pf_resc_request *p_resc; + u8 retry_cnt = VF_ACQUIRE_THRESH; + bool resources_acquired = false; + struct vfpf_acquire_tlv *req; + int rc = 0, attempts = 0; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + p_resc = &req->resc_request; + + /* starting filling the request */ + req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; + + p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF; + p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS; + p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS; + p_resc->num_cids = QED_ETH_VF_DEFAULT_NUM_CIDS; + + req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX; + req->vfdev_info.fw_major = FW_MAJOR_VERSION; + req->vfdev_info.fw_minor = FW_MINOR_VERSION; + req->vfdev_info.fw_revision = FW_REVISION_VERSION; + req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR; + req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR; + + /* Fill capability field with any non-deprecated config we support */ + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G; + + /* If we've mapped the doorbell bar, try using queue qids */ + if (p_iov->b_doorbell_bar) { + req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_PHYSICAL_BAR | + VFPF_ACQUIRE_CAP_QUEUE_QIDS; + p_resc->num_cids = QED_ETH_VF_MAX_NUM_CIDS; + } + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = p_iov->bulletin.phys; + req->bulletin_size = p_iov->bulletin.size; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + while (!resources_acquired) { + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, "attempting to acquire resources\n"); + + /* Clear response buffer, as this might be a re-send */ + memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* send acquire request */ + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + + /* Re-try acquire in case of vf-pf hw channel timeout */ + if (retry_cnt && rc == -EBUSY) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF retrying to acquire due to VPC timeout\n"); + retry_cnt--; + continue; + } + + if (rc) + goto exit; + + /* copy acquire response from buffer to p_hwfn */ + memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp)); + + attempts++; + + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + /* PF agrees to allocate our resources */ + if (!(resp->pfdev_info.capabilities & + PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) { + /* It's possible legacy PF mistakenly accepted; + * but we don't care - simply mark it as + * legacy and continue. + */ + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + } + DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n"); + resources_acquired = true; + } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc, + &resp->resc); + } else if (resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) { + if (pfdev_info->major_fp_hsi && + (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) { + DP_NOTICE(p_hwfn, + "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n", + pfdev_info->major_fp_hsi, + pfdev_info->minor_fp_hsi, + ETH_HSI_VER_MAJOR, + ETH_HSI_VER_MINOR, + pfdev_info->major_fp_hsi); + rc = -EINVAL; + goto exit; + } + + if (!pfdev_info->major_fp_hsi) { + if (req->vfdev_info.capabilities & + VFPF_ACQUIRE_CAP_PRE_FP_HSI) { + DP_NOTICE(p_hwfn, + "PF uses very old drivers. Please change to a VF driver using no later than 8.8.x.x.\n"); + rc = -EINVAL; + goto exit; + } else { + DP_INFO(p_hwfn, + "PF is old - try re-acquire to see if it supports FW-version override\n"); + req->vfdev_info.capabilities |= + VFPF_ACQUIRE_CAP_PRE_FP_HSI; + continue; + } + } + + /* If PF/VF are using same Major, PF must have had + * it's reasons. Simply fail. + */ + DP_NOTICE(p_hwfn, "PF rejected acquisition by VF\n"); + rc = -EINVAL; + goto exit; + } else { + DP_ERR(p_hwfn, + "PF returned error %d to VF acquisition request\n", + resp->hdr.status); + rc = -EAGAIN; + goto exit; + } + } + + /* Mark the PF as legacy, if needed */ + if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_PRE_FP_HSI) + p_iov->b_pre_fp_hsi = true; + + /* In case PF doesn't support multi-queue Tx, update the number of + * CIDs to reflect the number of queues [older PFs didn't fill that + * field]. + */ + if (!(resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_QUEUE_QIDS)) + resp->resc.num_cids = resp->resc.num_rxqs + resp->resc.num_txqs; + + /* Update bulletin board size with response from PF */ + p_iov->bulletin.size = resp->bulletin_size; + + /* get HW info */ + p_hwfn->cdev->type = resp->pfdev_info.dev_type; + p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev; + + p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff; + + /* Learn of the possibility of CMT */ + if (IS_LEAD_HWFN(p_hwfn)) { + if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) { + DP_NOTICE(p_hwfn, "100g VF\n"); + p_hwfn->cdev->num_hwfns = 2; + } + } + + if (!p_iov->b_pre_fp_hsi && + (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) { + DP_INFO(p_hwfn, + "PF is using older fastpath HSI; %02x.%02x is configured\n", + ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi); + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id) +{ + u32 bar_size; + + /* Regview size is fixed */ + if (bar_id == BAR_ID_0) + return 1 << 17; + + /* Doorbell is received from PF */ + bar_size = p_hwfn->vf_iov_info->acquire_resp.pfdev_info.bar_size; + if (bar_size) + return 1 << bar_size; + return 0; +} + +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + struct qed_hwfn *p_lead = QED_LEADING_HWFN(p_hwfn->cdev); + struct qed_vf_iov *p_iov; + u32 reg; + int rc; + + /* Set number of hwfns - might be overridden once leading hwfn learns + * actual configuration from PF. + */ + if (IS_LEAD_HWFN(p_hwfn)) + p_hwfn->cdev->num_hwfns = 1; + + reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS; + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg); + + reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS; + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg); + + /* Allocate vf sriov info */ + p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL); + if (!p_iov) + return -ENOMEM; + + /* Doorbells are tricky; Upper-layer has alreday set the hwfn doorbell + * value, but there are several incompatibily scenarios where that + * would be incorrect and we'd need to override it. + */ + if (!p_hwfn->doorbells) { + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + } else if (p_hwfn == p_lead) { + /* For leading hw-function, value is always correct, but need + * to handle scenario where legacy PF would not support 100g + * mapped bars later. + */ + p_iov->b_doorbell_bar = true; + } else { + /* here, value would be correct ONLY if the leading hwfn + * received indication that mapped-bars are supported. + */ + if (p_lead->vf_iov_info->b_doorbell_bar) + p_iov->b_doorbell_bar = true; + else + p_hwfn->doorbells = (u8 __iomem *) + p_hwfn->regview + PXP_VF_BAR0_START_DQ; + } + + /* Allocate vf2pf msg */ + p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + &p_iov->vf2pf_request_phys, + GFP_KERNEL); + if (!p_iov->vf2pf_request) + goto free_p_iov; + + p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + &p_iov->pf2vf_reply_phys, + GFP_KERNEL); + if (!p_iov->pf2vf_reply) + goto free_vf2pf_request; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n", + p_iov->vf2pf_request, + (u64)p_iov->vf2pf_request_phys, + p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys); + + /* Allocate Bulletin board */ + p_iov->bulletin.size = sizeof(struct qed_bulletin_content); + p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, + p_iov->bulletin.size, + &p_iov->bulletin.phys, + GFP_KERNEL); + if (!p_iov->bulletin.p_virt) + goto free_pf2vf_reply; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n", + p_iov->bulletin.p_virt, + (u64)p_iov->bulletin.phys, p_iov->bulletin.size); + + mutex_init(&p_iov->mutex); + + p_hwfn->vf_iov_info = p_iov; + + p_hwfn->hw_info.personality = QED_PCI_ETH; + + rc = qed_vf_pf_acquire(p_hwfn); + + /* If VF is 100g using a mapped bar and PF is too old to support that, + * acquisition would succeed - but the VF would have no way knowing + * the size of the doorbell bar configured in HW and thus will not + * know how to split it for 2nd hw-function. + * In this case we re-try without the indication of the mapped + * doorbell. + */ + if (!rc && p_iov->b_doorbell_bar && + !qed_vf_hw_bar_size(p_hwfn, BAR_ID_1) && + (p_hwfn->cdev->num_hwfns > 1)) { + rc = _qed_vf_pf_release(p_hwfn, false); + if (rc) + return rc; + + p_iov->b_doorbell_bar = false; + p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + rc = qed_vf_pf_acquire(p_hwfn); + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Regview [%p], Doorbell [%p], Device-doorbell [%p]\n", + p_hwfn->regview, p_hwfn->doorbells, p_hwfn->cdev->doorbells); + + return rc; + +free_pf2vf_reply: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union pfvf_tlvs), + p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys); +free_vf2pf_request: + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + sizeof(union vfpf_tlvs), + p_iov->vf2pf_request, p_iov->vf2pf_request_phys); +free_p_iov: + kfree(p_iov); + + return -ENOMEM; +} + +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) + +static void +__qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_src, + enum qed_tunn_mode mask, u8 *p_cls) +{ + if (p_src->b_update_mode) { + p_req->tun_mode_update_mask |= BIT(mask); + + if (p_src->b_mode_enabled) + p_req->tunn_mode |= BIT(mask); + } + + *p_cls = p_src->tun_cls; +} + +static void +qed_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct qed_tunn_update_type *p_src, + enum qed_tunn_mode mask, + u8 *p_cls, struct qed_tunn_update_udp_port *p_port, + u8 *p_update_port, u16 *p_udp_port) +{ + if (p_port->b_update_port) { + *p_update_port = 1; + *p_udp_port = p_port->port; + } + + __qed_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); +} + +void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) +{ + if (p_tun->vxlan.b_mode_enabled) + p_tun->vxlan.b_update_mode = true; + if (p_tun->l2_geneve.b_mode_enabled) + p_tun->l2_geneve.b_update_mode = true; + if (p_tun->ip_geneve.b_mode_enabled) + p_tun->ip_geneve.b_update_mode = true; + if (p_tun->l2_gre.b_mode_enabled) + p_tun->l2_gre.b_update_mode = true; + if (p_tun->ip_gre.b_mode_enabled) + p_tun->ip_gre.b_update_mode = true; + + p_tun->b_update_rx_cls = true; + p_tun->b_update_tx_cls = true; +} + +static void +__qed_vf_update_tunn_param(struct qed_tunn_update_type *p_tun, + u16 feature_mask, u8 tunn_mode, + u8 tunn_cls, enum qed_tunn_mode val) +{ + if (feature_mask & BIT(val)) { + p_tun->b_mode_enabled = tunn_mode; + p_tun->tun_cls = tunn_cls; + } else { + p_tun->b_mode_enabled = false; + } +} + +static void qed_vf_update_tunn_param(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tun, + struct pfvf_update_tunn_param_tlv *p_resp) +{ + /* Update mode and classes provided by PF */ + u16 feat_mask = p_resp->tunn_feature_mask; + + __qed_vf_update_tunn_param(&p_tun->vxlan, feat_mask, + p_resp->vxlan_mode, p_resp->vxlan_clss, + QED_MODE_VXLAN_TUNN); + __qed_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, + p_resp->l2geneve_mode, + p_resp->l2geneve_clss, + QED_MODE_L2GENEVE_TUNN); + __qed_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, + p_resp->ipgeneve_mode, + p_resp->ipgeneve_clss, + QED_MODE_IPGENEVE_TUNN); + __qed_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, + p_resp->l2gre_mode, p_resp->l2gre_clss, + QED_MODE_L2GRE_TUNN); + __qed_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, + p_resp->ipgre_mode, p_resp->ipgre_clss, + QED_MODE_IPGRE_TUNN); + p_tun->geneve_port.port = p_resp->geneve_udp_port; + p_tun->vxlan_port.port = p_resp->vxlan_udp_port; + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", + p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled, + p_tun->l2_gre.b_mode_enabled, p_tun->ip_gre.b_mode_enabled); +} + +int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_src) +{ + struct qed_tunnel_info *p_tun = &p_hwfn->cdev->tunnel; + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + int rc; + + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, + sizeof(*p_req)); + + if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) + p_req->update_tun_cls = 1; + + qed_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, QED_MODE_VXLAN_TUNN, + &p_req->vxlan_clss, &p_src->vxlan_port, + &p_req->update_vxlan_port, + &p_req->vxlan_port); + qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, + QED_MODE_L2GENEVE_TUNN, + &p_req->l2geneve_clss, &p_src->geneve_port, + &p_req->update_geneve_port, + &p_req->geneve_port); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, + QED_MODE_IPGENEVE_TUNN, + &p_req->ipgeneve_clss); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, + QED_MODE_L2GRE_TUNN, &p_req->l2gre_clss); + __qed_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, + QED_MODE_IPGRE_TUNN, &p_req->ipgre_clss); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->tunn_param_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); + + if (rc) + goto exit; + + if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Failed to update tunnel parameters\n"); + rc = -EINVAL; + } + + qed_vf_update_tunn_param(p_hwfn, p_tun, p_resp); +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +int +qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_rxq_tlv *req; + u8 rx_qid = p_cid->rel.queue_id; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); + + req->rx_qid = rx_qid; + req->cqe_pbl_addr = cqe_pbl_addr; + req->cqe_pbl_size = cqe_pbl_size; + req->rxq_addr = bd_chain_phys_addr; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; + req->bd_max_bytes = bd_max_bytes; + req->stat_id = -1; + + /* If PF is legacy, we'll need to calculate producers ourselves + * as well as clean them. + */ + if (p_iov->b_pre_fp_hsi) { + u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; + u32 init_prod_val = 0; + + *pp_prod = (u8 __iomem *) + p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->cdev) + + hw_qid * MSTORM_QZONE_SIZE; + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)(&init_prod_val)); + } + + qed_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + + /* Learn the address of the producer from the response */ + if (!p_iov->b_pre_fp_hsi) { + u32 init_prod_val = 0; + + *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n", + rx_qid, *pp_prod, resp->offset); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), + (u32 *)&init_prod_val); + } +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, bool cqe_completion) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_rxqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); + + req->rx_qid = p_cid->rel.queue_id; + req->num_rxqs = 1; + req->cqe_completion = cqe_completion; + + qed_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_start_queue_resp_tlv *resp; + struct vfpf_start_txq_tlv *req; + u16 qid = p_cid->rel.queue_id; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); + + req->tx_qid = qid; + + /* Tx */ + req->pbl_addr = pbl_addr; + req->pbl_size = pbl_size; + req->hw_sb = p_cid->sb_igu_id; + req->sb_index = p_cid->sb_idx; + + qed_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->queue_start; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + + /* Modern PFs provide the actual offsets, while legacy + * provided only the queue id. + */ + if (!p_iov->b_pre_fp_hsi) { + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset; + } else { + u8 cid = p_iov->acquire_resp.resc.cid[qid]; + + *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + + qed_db_addr_vf(cid, + DQ_DEMS_LEGACY); + } + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Txq[0x%02x.%02x]: doorbell at %p [offset 0x%08x]\n", + qid, p_cid->qid_usage_idx, *pp_doorbell, resp->offset); +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_txqs_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); + + req->tx_qid = p_cid->rel.queue_id; + req->num_txqs = 1; + + qed_vf_pf_add_qid(p_hwfn, p_cid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, u8 only_untagged) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_start_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); + + req->mtu = mtu; + req->vport_id = vport_id; + req->inner_vlan_removal = inner_vlan_removal; + req->tpa_mode = tpa_mode; + req->max_buffers_per_cqe = max_buffers_per_cqe; + req->only_untagged = only_untagged; + + /* status blocks */ + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) { + struct qed_sb_info *p_sb = p_hwfn->vf_iov_info->sbs_info[i]; + + if (p_sb) + req->sb_addr[i] = p_sb->sb_phys; + } + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +static bool +qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data, + u16 tlv) +{ + switch (tlv) { + case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE: + return !!(p_data->update_vport_active_rx_flg || + p_data->update_vport_active_tx_flg); + case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH: + return !!p_data->update_tx_switching_flg; + case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP: + return !!p_data->update_inner_vlan_removal_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN: + return !!p_data->update_accept_any_vlan_flg; + case CHANNEL_TLV_VPORT_UPDATE_MCAST: + return !!p_data->update_approx_mcast_flg; + case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM: + return !!(p_data->accept_flags.update_rx_mode_config || + p_data->accept_flags.update_tx_mode_config); + case CHANNEL_TLV_VPORT_UPDATE_RSS: + return !!p_data->rss_params; + case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA: + return !!p_data->sge_tpa_params; + default: + DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n", + tlv); + return false; + } +} + +static void +qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_data) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *p_resp; + u16 tlv; + + for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) { + if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv)) + continue; + + p_resp = (struct pfvf_def_resp_tlv *) + qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, + tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "TLV[%d] Configuration %s\n", + tlv, + (p_resp && p_resp->hdr.status) ? "succeeded" + : "failed"); + } +} + +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_update_tlv *req; + struct pfvf_def_resp_tlv *resp; + u8 update_rx, update_tx; + u16 size, tlv; + int rc; + + resp = &p_iov->pf2vf_reply->default_resp; + + update_rx = p_params->update_vport_active_rx_flg; + update_tx = p_params->update_vport_active_tx_flg; + + /* clear mailbox and prep header tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); + + /* Prepare extended tlvs */ + if (update_rx || update_tx) { + struct vfpf_vport_update_activate_tlv *p_act_tlv; + + size = sizeof(struct vfpf_vport_update_activate_tlv); + p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + size); + + if (update_rx) { + p_act_tlv->update_rx = update_rx; + p_act_tlv->active_rx = p_params->vport_active_rx_flg; + } + + if (update_tx) { + p_act_tlv->update_tx = update_tx; + p_act_tlv->active_tx = p_params->vport_active_tx_flg; + } + } + + if (p_params->update_tx_switching_flg) { + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + + size = sizeof(struct vfpf_vport_update_tx_switch_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + tlv, size); + + p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; + } + + if (p_params->update_approx_mcast_flg) { + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + + size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); + p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_MCAST, size); + + memcpy(p_mcast_tlv->bins, p_params->bins, + sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; + update_tx = p_params->accept_flags.update_tx_mode_config; + + if (update_rx || update_tx) { + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + size = sizeof(struct vfpf_vport_update_accept_param_tlv); + p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + + if (update_rx) { + p_accept_tlv->update_rx_mode = update_rx; + p_accept_tlv->rx_accept_filter = + p_params->accept_flags.rx_accept_filter; + } + + if (update_tx) { + p_accept_tlv->update_tx_mode = update_tx; + p_accept_tlv->tx_accept_filter = + p_params->accept_flags.tx_accept_filter; + } + } + + if (p_params->rss_params) { + struct qed_rss_params *rss_params = p_params->rss_params; + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + int i, table_size; + + size = sizeof(struct vfpf_vport_update_rss_tlv); + p_rss_tlv = qed_add_tlv(p_hwfn, + &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_RSS, size); + + if (rss_params->update_rss_config) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CONFIG_FLAG; + if (rss_params->update_rss_capabilities) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CAPS_FLAG; + if (rss_params->update_rss_ind_table) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_IND_TABLE_FLAG; + if (rss_params->update_rss_key) + p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; + + p_rss_tlv->rss_enable = rss_params->rss_enable; + p_rss_tlv->rss_caps = rss_params->rss_caps; + p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; + + table_size = min_t(int, T_ETH_INDIRECTION_TABLE_SIZE, + 1 << p_rss_tlv->rss_table_size_log); + for (i = 0; i < table_size; i++) { + struct qed_queue_cid *p_queue; + + p_queue = rss_params->rss_ind_table[i]; + p_rss_tlv->rss_ind_table[i] = p_queue->rel.queue_id; + } + memcpy(p_rss_tlv->rss_key, rss_params->rss_key, + sizeof(rss_params->rss_key)); + } + + if (p_params->update_accept_any_vlan_flg) { + struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; + + size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + + p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; + p_any_vlan_tlv->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; + } + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + + qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp; + struct vfpf_first_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } + + p_hwfn->b_int_enabled = 0; + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ + struct qed_sp_vport_update_params sp_params; + int i; + + memset(&sp_params, 0, sizeof(sp_params)); + sp_params.update_approx_mcast_flg = 1; + + if (p_filter_cmd->opcode == QED_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); + sp_params.bins[bit / 32] |= 1 << (bit % 32); + } + } + + qed_vf_pf_vport_update(p_hwfn, &sp_params); +} + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_ucast) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_ucast_filter_tlv *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; + memcpy(req->mac, p_ucast->mac, ETH_ALEN); + req->vlan = p_ucast->vlan; + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EAGAIN; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv)); + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) { + rc = -EINVAL; + goto exit; + } + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_read_coal_resp_tlv *resp; + struct vfpf_read_coal_req_tlv *req; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_READ, sizeof(*req)); + req->qid = p_cid->rel.queue_id; + req->is_rx = p_cid->b_is_rx ? 1 : 0; + + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + resp = &p_iov->pf2vf_reply->read_coal_resp; + + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + *p_coal = resp->coal; +exit: + qed_vf_pf_req_end(p_hwfn, rc); + + return rc; +} + +int +qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + const u8 *p_mac) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_bulletin_update_mac_tlv *p_req; + struct pfvf_def_resp_tlv *p_resp; + int rc; + + if (!p_mac) + return -EINVAL; + + /* clear mailbox and prep header tlv */ + p_req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_BULLETIN_UPDATE_MAC, + sizeof(*p_req)); + ether_addr_copy(p_req->mac, p_mac); + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Requesting bulletin update for MAC[%pM]\n", p_mac); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &p_resp->hdr.status); + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +int +qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, u16 tx_coal, struct qed_queue_cid *p_cid) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_update_coalesce *req; + struct pfvf_def_resp_tlv *resp; + int rc; + + /* clear mailbox and prep header tlv */ + req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_COALESCE_UPDATE, sizeof(*req)); + + req->rx_coal = rx_coal; + req->tx_coal = tx_coal; + req->qid = p_cid->rel.queue_id; + + DP_VERBOSE(p_hwfn, + QED_MSG_IOV, + "Setting coalesce rx_coal = %d, tx_coal = %d at queue = %d\n", + rx_coal, tx_coal, req->qid); + + /* add list termination tlv */ + qed_add_tlv(p_hwfn, &p_iov->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + resp = &p_iov->pf2vf_reply->default_resp; + rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status); + if (rc) + goto exit; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + goto exit; + + if (rx_coal) + p_hwfn->cdev->rx_coalesce_usecs = rx_coal; + + if (tx_coal) + p_hwfn->cdev->tx_coalesce_usecs = tx_coal; + +exit: + qed_vf_pf_req_end(p_hwfn, rc); + return rc; +} + +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return 0; + } + + return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; +} + +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n"); + return; + } + + if (sb_id >= PFVF_MAX_SBS_PER_VF) { + DP_NOTICE(p_hwfn, "Can't configure SB %04x\n", sb_id); + return; + } + + p_iov->sbs_info[sb_id] = p_sb; +} + +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change) +{ + struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct qed_bulletin_content shadow; + u32 crc, crc_size; + + crc_size = sizeof(p_iov->bulletin.p_virt->crc); + *p_change = 0; + + /* Need to guarantee PF is not in the middle of writing it */ + memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); + + /* If version did not update, no need to do anything */ + if (shadow.version == p_iov->bulletin_shadow.version) + return 0; + + /* Verify the bulletin we see is valid */ + crc = crc32(0, (u8 *)&shadow + crc_size, + p_iov->bulletin.size - crc_size); + if (crc != shadow.crc) + return -EAGAIN; + + /* Set the shadow bulletin and process it */ + memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); + + DP_VERBOSE(p_hwfn, QED_MSG_IOV, + "Read a bulletin update %08x\n", shadow.version); + + *p_change = 1; + + return 0; +} + +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_params, 0, sizeof(*p_params)); + + p_params->speed.autoneg = p_bulletin->req_autoneg; + p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; + p_params->speed.forced_speed = p_bulletin->req_forced_speed; + p_params->pause.autoneg = p_bulletin->req_autoneg_pause; + p_params->pause.forced_rx = p_bulletin->req_forced_rx; + p_params->pause.forced_tx = p_bulletin->req_forced_tx; + p_params->loopback_mode = p_bulletin->req_loopback; +} + +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ + __qed_vf_get_link_params(p_hwfn, params, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link, 0, sizeof(*p_link)); + + p_link->link_up = p_bulletin->link_up; + p_link->speed = p_bulletin->speed; + p_link->full_duplex = p_bulletin->full_duplex; + p_link->an = p_bulletin->autoneg; + p_link->an_complete = p_bulletin->autoneg_complete; + p_link->parallel_detection = p_bulletin->parallel_detection; + p_link->pfc_enabled = p_bulletin->pfc_enabled; + p_link->partner_adv_speed = p_bulletin->partner_adv_speed; + p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; + p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; + p_link->partner_adv_pause = p_bulletin->partner_adv_pause; + p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; +} + +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ + __qed_vf_get_link_state(p_hwfn, link, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ + memset(p_link_caps, 0, sizeof(*p_link_caps)); + p_link_caps->speed_capabilities = p_bulletin->capability_speed; +} + +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ + __qed_vf_get_link_caps(p_hwfn, p_link_caps, + &(p_hwfn->vf_iov_info->bulletin_shadow)); +} + +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ + *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; +} + +void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) +{ + *num_txqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_txqs; +} + +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ + *num_cids = p_hwfn->vf_iov_info->acquire_resp.resc.num_cids; +} + +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ + memcpy(port_mac, + p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN); +} + +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters) +{ + struct qed_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; +} + +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters) +{ + struct qed_vf_iov *p_vf = p_hwfn->vf_iov_info; + + *num_mac_filters = p_vf->acquire_resp.resc.num_mac_filters; +} + +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return true; + + /* Forbid VF from changing a MAC enforced by PF */ + if (ether_addr_equal(bulletin->mac, mac)) + return false; + + return false; +} + +static bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn, + u8 *dst_mac, u8 *p_is_forced) +{ + struct qed_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + if (p_is_forced) + *p_is_forced = 1; + } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { + if (p_is_forced) + *p_is_forced = 0; + } else { + return false; + } + + ether_addr_copy(dst_mac, bulletin->mac); + + return true; +} + +static void +qed_vf_bulletin_get_udp_ports(struct qed_hwfn *p_hwfn, + u16 *p_vxlan_port, u16 *p_geneve_port) +{ + struct qed_bulletin_content *p_bulletin; + + p_bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + + *p_vxlan_port = p_bulletin->vxlan_udp_port; + *p_geneve_port = p_bulletin->geneve_udp_port; +} + +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ + struct pf_vf_pfdev_info *info; + + info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; + + *fw_major = info->fw_major; + *fw_minor = info->fw_minor; + *fw_rev = info->fw_rev; + *fw_eng = info->fw_eng; +} + +static void qed_handle_bulletin_change(struct qed_hwfn *hwfn) +{ + struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth; + u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced; + void *cookie = hwfn->cdev->ops_cookie; + u16 vxlan_port, geneve_port; + + qed_vf_bulletin_get_udp_ports(hwfn, &vxlan_port, &geneve_port); + is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac, + &is_mac_forced); + if (is_mac_exist && cookie) + ops->force_mac(cookie, mac, !!is_mac_forced); + + ops->ports_update(cookie, vxlan_port, geneve_port); + + /* Always update link configuration according to bulletin */ + qed_link_update(hwfn, NULL); +} + +void qed_iov_vf_task(struct work_struct *work) +{ + struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, + iov_task.work); + u8 change = 0; + + if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags)) + return; + + /* Handle bulletin board changes */ + qed_vf_read_bulletin(hwfn, &change); + if (test_and_clear_bit(QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG, + &hwfn->iov_task_flags)) + change = 1; + if (change) + qed_handle_bulletin_change(hwfn); + + /* As VF is polling bulletin board, need to constantly re-schedule */ + queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ); +} diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h new file mode 100644 index 0000000000..2bd51a41ce --- /dev/null +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -0,0 +1,1275 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* QLogic qed NIC Driver + * Copyright (c) 2015-2017 QLogic Corporation + */ + +#ifndef _QED_VF_H +#define _QED_VF_H + +#include "qed_l2.h" +#include "qed_mcp.h" + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY_SIZE 10 + +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 num_cids; + u8 padding; +}; + +struct hw_sb_info { + u16 hw_sb_id; + u8 sb_qid; + u8 padding[5]; +}; + +#define TLV_BUFFER_SIZE 1024 + +enum { + PFVF_STATUS_WAITING, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE, + PFVF_STATUS_FORCED, + PFVF_STATUS_MALICIOUS, +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 padding; + u64 reply_address; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_def_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +#define VFPF_ACQUIRE_OS_LINUX (0) +#define VFPF_ACQUIRE_OS_WINDOWS (1) +#define VFPF_ACQUIRE_OS_ESX (2) +#define VFPF_ACQUIRE_OS_SOLARIS (3) +#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) + +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { +#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */ +#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */ + /* A requirement for supporting multi-Tx queues on a single queue-zone, + * VF would pass qids as additional information whenever passing queue + * references. + */ +#define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2) + + /* The VF is using the physical bar. While this is mostly internal + * to the VF, might affect the number of CIDs supported assuming + * QUEUE_QIDS is set. + */ +#define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3) + u64 capabilities; + u8 fw_major; + u8 fw_minor; + u8 fw_revision; + u8 fw_engineering; + u32 driver_version; + u16 opaque_fid; /* ME register value */ + u8 os_type; /* VFPF_ACQUIRE_OS_* value */ + u8 eth_fp_hsi_major; + u8 eth_fp_hsi_minor; + u8 padding[3]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + u64 bulletin_addr; + u32 bulletin_size; + u32 padding; +}; + +/* receive side scaling tlv */ +struct vfpf_vport_update_rss_tlv { + struct channel_tlv tl; + + u8 update_rss_flags; +#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0) +#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1) +#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2) +#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3) + + u8 rss_enable; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY_SIZE]; +}; + +struct pfvf_storm_stats { + u32 address; + u32 len; +}; + +struct pfvf_stats_info { + struct pfvf_storm_stats mstats; + struct pfvf_storm_stats pstats; + struct pfvf_storm_stats tstats; + struct pfvf_storm_stats ustats; +}; + +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + + struct pf_vf_pfdev_info { + u32 chip_num; + u32 mfw_ver; + + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + u64 capabilities; +#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) +#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ +/* There are old PF versions where the PF might mistakenly override the sanity + * mechanism [version-based] and allow a VF that can't be supported to pass + * the acquisition phase. + * To overcome this, PFs now indicate that they're past that point and the new + * VFs would fail probe on the older PFs that fail to do so. + */ +#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2) + + /* PF expects queues to be received with additional qids */ +#define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3) + + u16 db_size; + u8 indices_per_sb; + u8 os_type; + + /* These should match the PF's qed_dev values */ + u16 chip_rev; + u8 dev_type; + + /* Doorbell bar size configured in HW: log(size) or 0 */ + u8 bar_size; + + struct pfvf_stats_info stats_info; + + u8 port_mac[ETH_ALEN]; + + /* It's possible PF had to configure an older fastpath HSI + * [in case VF is newer than PF]. This is communicated back + * to the VF. It can also be used in case of error due to + * non-matching versions to shed light in VF about failure. + */ + u8 major_fp_hsi; + u8 minor_fp_hsi; + } pfdev_info; + + struct pf_vf_resc { +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 cid[PFVF_MAX_QUEUES_PER_VF]; + + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 num_cids; + u8 padding; + } resc; + + u32 bulletin_size; + u32 padding; +}; + +struct pfvf_start_queue_resp_tlv { + struct pfvf_tlv hdr; + u32 offset; /* offset to consumer/producer of queue */ + u8 padding[4]; +}; + +/* Extended queue information - additional index for reference inside qzone. + * If communicated between VF/PF, each TLV relating to queues should be + * extended by one such [or have a future base TLV that already contains info]. + */ +struct vfpf_qid_tlv { + struct channel_tlv tl; + u8 qid; + u8 padding[3]; +}; + +/* Setup Queue */ +struct vfpf_start_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 rxq_addr; + u64 deprecated_sge_addr; + u64 cqe_pbl_addr; + + u16 cqe_pbl_size; + u16 hw_sb; + u16 rx_qid; + u16 hc_rate; /* desired interrupts per sec. */ + + u16 bd_max_bytes; + u16 stat_id; + u8 sb_index; + u8 padding[3]; +}; + +struct vfpf_start_txq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + u64 pbl_addr; + u16 pbl_size; + u16 stat_id; + u16 tx_qid; + u16 hw_sb; + + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 hc_rate; /* desired interrupts per sec. */ + u8 sb_index; + u8 padding[3]; +}; + +/* Stop RX Queue */ +struct vfpf_stop_rxqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 rx_qid; + + /* this field is deprecated and should *always* be set to '1' */ + u8 num_rxqs; + u8 cqe_completion; + u8 padding[4]; +}; + +/* Stop TX Queues */ +struct vfpf_stop_txqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 tx_qid; + + /* this field is deprecated and should *always* be set to '1' */ + u8 num_txqs; + u8 padding[5]; +}; + +struct vfpf_update_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; + + u16 rx_qid; + u8 num_rxqs; + u8 flags; +#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0) +#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1) +#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2) + + u8 padding[4]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + + u8 mac[ETH_ALEN]; + u16 vlan_tag; + + u8 padding[4]; +}; + +/* Start a vport */ +struct vfpf_vport_start_tlv { + struct vfpf_first_tlv first_tlv; + + u64 sb_addr[PFVF_MAX_SBS_PER_VF]; + + u32 tpa_mode; + u16 dep1; + u16 mtu; + + u8 vport_id; + u8 inner_vlan_removal; + + u8 only_untagged; + u8 max_buffers_per_cqe; + + u8 padding[4]; +}; + +/* Extended tlvs - need to add rss, mcast, accept mode tlvs */ +struct vfpf_vport_update_activate_tlv { + struct channel_tlv tl; + u8 update_rx; + u8 update_tx; + u8 active_rx; + u8 active_tx; +}; + +struct vfpf_vport_update_tx_switch_tlv { + struct channel_tlv tl; + u8 tx_switching; + u8 padding[3]; +}; + +struct vfpf_vport_update_vlan_strip_tlv { + struct channel_tlv tl; + u8 remove_vlan; + u8 padding[3]; +}; + +struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + + /* There are only 256 approx bins, and in HSI they're divided into + * 32-bit values. As old VFs used to set-bit to the values on its side, + * the upper half of the array is never expected to contain any data. + */ + u64 bins[4]; + u64 obsolete_bins[4]; +}; + +struct vfpf_vport_update_accept_param_tlv { + struct channel_tlv tl; + u8 update_rx_mode; + u8 update_tx_mode; + u8 rx_accept_filter; + u8 tx_accept_filter; +}; + +struct vfpf_vport_update_accept_any_vlan_tlv { + struct channel_tlv tl; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + + u8 padding[2]; +}; + +struct vfpf_vport_update_sge_tpa_tlv { + struct channel_tlv tl; + + u16 sge_tpa_flags; +#define VFPF_TPA_IPV4_EN_FLAG BIT(0) +#define VFPF_TPA_IPV6_EN_FLAG BIT(1) +#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2) +#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3) +#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4) + + u8 update_sge_tpa_flags; +#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0) +#define VFPF_UPDATE_TPA_EN_FLAG BIT(1) +#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2) + + u8 max_buffers_per_cqe; + + u16 deprecated_sge_buff_size; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + + u8 tpa_max_aggs_num; + u8 padding[7]; +}; + +/* Primary tlv as a header for various extended tlvs for + * various functionalities in vport update ramrod. + */ +struct vfpf_vport_update_tlv { + struct vfpf_first_tlv first_tlv; +}; + +struct vfpf_ucast_filter_tlv { + struct vfpf_first_tlv first_tlv; + + u8 opcode; + u8 type; + + u8 mac[ETH_ALEN]; + + u16 vlan; + u16 padding[3]; +}; + +/* tunnel update param tlv */ +struct vfpf_update_tunn_param_tlv { + struct vfpf_first_tlv first_tlv; + + u8 tun_mode_update_mask; + u8 tunn_mode; + u8 update_tun_cls; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u8 update_geneve_port; + u8 update_vxlan_port; + u16 geneve_port; + u16 vxlan_port; + u8 padding[2]; +}; + +struct pfvf_update_tunn_param_tlv { + struct pfvf_tlv hdr; + + u16 tunn_feature_mask; + u8 vxlan_mode; + u8 l2geneve_mode; + u8 ipgeneve_mode; + u8 l2gre_mode; + u8 ipgre_mode; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u16 vxlan_udp_port; + u16 geneve_udp_port; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +struct vfpf_update_coalesce { + struct vfpf_first_tlv first_tlv; + u16 rx_coal; + u16 tx_coal; + u16 qid; + u8 padding[2]; +}; + +struct vfpf_read_coal_req_tlv { + struct vfpf_first_tlv first_tlv; + u16 qid; + u8 is_rx; + u8 padding[5]; +}; + +struct pfvf_read_coal_resp_tlv { + struct pfvf_tlv hdr; + u16 coal; + u8 padding[6]; +}; + +struct vfpf_bulletin_update_mac_tlv { + struct vfpf_first_tlv first_tlv; + u8 mac[ETH_ALEN]; + u8 padding[2]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_start_rxq_tlv start_rxq; + struct vfpf_start_txq_tlv start_txq; + struct vfpf_stop_rxqs_tlv stop_rxqs; + struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_update_rxq_tlv update_rxq; + struct vfpf_vport_start_tlv start_vport; + struct vfpf_vport_update_tlv vport_update; + struct vfpf_ucast_filter_tlv ucast_filter; + struct vfpf_update_tunn_param_tlv tunn_param_update; + struct vfpf_update_coalesce update_coalesce; + struct vfpf_read_coal_req_tlv read_coal_req; + struct vfpf_bulletin_update_mac_tlv bulletin_update_mac; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_def_resp_tlv default_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct tlv_buffer_size tlv_buf_size; + struct pfvf_start_queue_resp_tlv queue_start; + struct pfvf_update_tunn_param_tlv tunn_param_resp; + struct pfvf_read_coal_resp_tlv read_coal_resp; +}; + +enum qed_bulletin_bit { + /* Alert the VF that a forced MAC was set by the PF */ + MAC_ADDR_FORCED = 0, + /* Alert the VF that a forced VLAN was set by the PF */ + VLAN_ADDR_FORCED = 2, + + /* Indicate that `default_only_untagged' contains actual data */ + VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, + VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + + /* Alert the VF that suggested mac was sent by the PF. + * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set. + */ + VFPF_BULLETIN_MAC_ADDR = 5 +}; + +struct qed_bulletin_content { + /* crc of structure to ensure is not in mid-update */ + u32 crc; + + u32 version; + + /* bitmap indicating which fields hold valid values */ + u64 valid_bitmap; + + /* used for MAC_ADDR or MAC_ADDR_FORCED */ + u8 mac[ETH_ALEN]; + + /* If valid, 1 => only untagged Rx if no vlan is configured */ + u8 default_only_untagged; + u8 padding; + + /* The following is a 'copy' of qed_mcp_link_state, + * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's + * possible the structs will increase further along the road we cannot + * have it here; Instead we need to have all of its fields. + */ + u8 req_autoneg; + u8 req_autoneg_pause; + u8 req_forced_rx; + u8 req_forced_tx; + u8 padding2[4]; + + u32 req_adv_speed; + u32 req_forced_speed; + u32 req_loopback; + u32 padding3; + + u8 link_up; + u8 full_duplex; + u8 autoneg; + u8 autoneg_complete; + u8 parallel_detection; + u8 pfc_enabled; + u8 partner_tx_flow_ctrl_en; + u8 partner_rx_flow_ctrl_en; + u8 partner_adv_pause; + u8 sfp_tx_fault; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 padding4[2]; + + u32 speed; + u32 partner_adv_speed; + + u32 capability_speed; + + /* Forced vlan */ + u16 pvid; + u16 padding5; +}; + +struct qed_bulletin { + dma_addr_t phys; + struct qed_bulletin_content *p_virt; + u32 size; +}; + +enum { + CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_START_RXQ, + CHANNEL_TLV_START_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_UPDATE_TUNN_PARAM, + CHANNEL_TLV_COALESCE_UPDATE, + CHANNEL_TLV_QID, + CHANNEL_TLV_COALESCE_READ, + CHANNEL_TLV_BULLETIN_UPDATE_MAC, + CHANNEL_TLV_MAX, + + /* Required for iterating over vport-update tlvs. + * Will break in case non-sequential vport-update tlvs. + */ + CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1, +}; + +/* Default number of CIDs [total of both Rx and Tx] to be requested + * by default, and maximum possible number. + */ +#define QED_ETH_VF_DEFAULT_NUM_CIDS (32) +#define QED_ETH_VF_MAX_NUM_CIDS (250) + +/* This data is held in the qed_hwfn structure for VFs only. */ +struct qed_vf_iov { + union vfpf_tlvs *vf2pf_request; + dma_addr_t vf2pf_request_phys; + union pfvf_tlvs *pf2vf_reply; + dma_addr_t pf2vf_reply_phys; + + /* Should be taken whenever the mailbox buffers are accessed */ + struct mutex mutex; + u8 *offset; + + /* Bulletin Board */ + struct qed_bulletin bulletin; + struct qed_bulletin_content bulletin_shadow; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; + + /* In case PF originates prior to the fp-hsi version comparison, + * this has to be propagated as it affects the fastpath. + */ + bool b_pre_fp_hsi; + + /* Current day VFs are passing the SBs physical address on vport + * start, and as they lack an IGU mapping they need to store the + * addresses of previously registered SBs. + * Even if we were to change configuration flow, due to backward + * compatibility [with older PFs] we'd still need to store these. + */ + struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF]; + + /* Determines whether VF utilizes doorbells via limited register + * bar or via the doorbell bar. + */ + bool b_doorbell_bar; +}; + +/** + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. + * + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. + * + **/ +int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, + u16 rx_coal, + u16 tx_coal, struct qed_queue_cid *p_cid); + +/** + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. + * + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. + * + * Return: Int. + **/ +int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, + u16 *p_coal, struct qed_queue_cid *p_cid); + +#ifdef CONFIG_QED_SRIOV +/** + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. + * + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. + * + * Return: enum _qed_status. + */ +int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); + +/** + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. + * + * Return: Void. + */ +void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params); + +/** + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF + * + * Return: Void. + */ +void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link); + +/** + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. + * + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. + */ +void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps); + +/** + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues + * + * Return: Void. + */ +void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); + +/** + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. + */ +void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); + +/** + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections + * + * Return: Void. + */ +void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); + +/** + * qed_vf_get_port_mac(): Get port mac address for VF. + * + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. + */ +void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); + +/** + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters + * + * Return: Void. + */ +void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters); + +/** + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed + * + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. + */ +void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); + +/** + * qed_vf_check_mac(): Check if VF can set a MAC address + * + * @p_hwfn: HW device data. + * @mac: Mac. + * + * Return: bool. + */ +bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); + +/** + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. + * + * Return: Void. + */ +void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng); + +/** + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message + * + * @p_hwfn: HW device data. + * + * Return: Int. + */ +int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); + +/** + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath + * + * Return: Int. + */ +int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod); + +/** + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. + * + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. + * + * Return: Int. + */ +int +qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell); + +/** + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. + * + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. + * + * Return: Int. + */ +int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, bool cqe_completion); + +/** + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. + * + * @p_hwfn: HW device data. + * @p_cid: CID. + * + * Return: Int. + */ +int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); + +/** + * qed_vf_pf_vport_update(): VF - send a vport update command. + * + * @p_hwfn: HW device data. + * @p_params: Params + * + * Return: Int. + */ +int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params); + +/** + * qed_vf_pf_reset(): VF - send a close message to PF. + * + * @p_hwfn: HW device data. + * + * Return: enum _qed_status + */ +int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); + +/** + * qed_vf_pf_release(): VF - free vf`s memories. + * + * @p_hwfn: HW device data. + * + * Return: enum _qed_status + */ +int qed_vf_pf_release(struct qed_hwfn *p_hwfn); + +/** + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given + * sb_id. For VFs igu sbs don't have to be contiguous + * + * @p_hwfn: HW device data. + * @sb_id: SB ID. + * + * Return: INLINE u16 + */ +u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); + +/** + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. + * + * Return: Void. + */ +void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, + u16 sb_id, struct qed_sb_info *p_sb); + +/** + * qed_vf_pf_vport_start(): perform vport start for VF. + * + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance + * + * Return: enum _qed_status + */ +int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, u8 only_untagged); + +/** + * qed_vf_pf_vport_stop(): stop the VF's vport + * + * @p_hwfn: HW device data. + * + * Return: enum _qed_status + */ +int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); + +int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param); + +void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd); + +/** + * qed_vf_pf_int_cleanup(): clean the SB of the VF + * + * @p_hwfn: HW device data. + * + * Return: enum _qed_status + */ +int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); + +/** + * __qed_vf_get_link_params(): return the link params in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. + */ +void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *p_params, + struct qed_bulletin_content *p_bulletin); + +/** + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. + * + * Return: Void. + */ +void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content *p_bulletin); + +/** + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board + * + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. + */ +void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin); + +void qed_iov_vf_task(struct work_struct *work); +void qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun); +int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tunn); + +u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); +/** + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board + * + * Return: Int. + */ +int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac); + +#else +static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params *params) +{ +} + +static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *link) +{ +} + +static inline void +qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps) +{ +} + +static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs) +{ +} + +static inline void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs) +{ +} + +static inline void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids) +{ +} + +static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac) +{ +} + +static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ +} + +static inline void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, + u8 *num_mac_filters) +{ +} + +static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac) +{ + return false; +} + +static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ +} + +static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_adr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, void __iomem **pp_prod) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + dma_addr_t pbl_addr, + u16 pbl_size, void __iomem **pp_doorbell) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid, + bool cqe_completion) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, + struct qed_queue_cid *p_cid) +{ + return -EINVAL; +} + +static inline int +qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, + struct qed_sp_vport_update_params *p_params) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id) +{ + return 0; +} + +static inline void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, + struct qed_sb_info *p_sb) +{ +} + +static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum qed_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, + u8 only_untagged) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn, + struct qed_filter_ucast *p_param) +{ + return -EINVAL; +} + +static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, + struct qed_filter_mcast *p_filter_cmd) +{ +} + +static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn) +{ + return -EINVAL; +} + +static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_params + *p_params, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_state *p_link, + struct qed_bulletin_content + *p_bulletin) +{ +} + +static inline void +__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, + struct qed_mcp_link_capabilities *p_link_caps, + struct qed_bulletin_content *p_bulletin) +{ +} + +static inline void qed_iov_vf_task(struct work_struct *work) +{ +} + +static inline void +qed_vf_set_vf_start_tunn_update_param(struct qed_tunnel_info *p_tun) +{ +} + +static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, + struct qed_tunnel_info *p_tunn) +{ + return -EINVAL; +} + +static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, + const u8 *p_mac) +{ + return -EINVAL; +} + +static inline u32 +qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, + enum BAR_ID bar_id) +{ + return 0; +} +#endif + +#endif |