diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/infiniband/hw/i40iw | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/infiniband/hw/i40iw')
29 files changed, 28974 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/i40iw/Kconfig b/drivers/infiniband/hw/i40iw/Kconfig new file mode 100644 index 000000000..d867ef1ac --- /dev/null +++ b/drivers/infiniband/hw/i40iw/Kconfig @@ -0,0 +1,8 @@ +config INFINIBAND_I40IW + tristate "Intel(R) Ethernet X722 iWARP Driver" + depends on INET && I40E + depends on IPV6 || !IPV6 + depends on PCI + select GENERIC_ALLOCATOR + ---help--- + Intel(R) Ethernet X722 iWARP Driver diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile new file mode 100644 index 000000000..5a8a7a3f2 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-y := -Idrivers/net/ethernet/intel/i40e + +obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o + +i40iw-objs :=\ + i40iw_cm.o i40iw_ctrl.o \ + i40iw_hmc.o i40iw_hw.o i40iw_main.o \ + i40iw_pble.o i40iw_puda.o i40iw_uk.o i40iw_utils.o \ + i40iw_verbs.o i40iw_virtchnl.o i40iw_vf.o diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h new file mode 100644 index 000000000..2f2b4426d --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -0,0 +1,602 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_IW_H +#define I40IW_IW_H +#include <linux/netdevice.h> +#include <linux/inetdevice.h> +#include <linux/spinlock.h> +#include <linux/kernel.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/crc32c.h> +#include <rdma/ib_smi.h> +#include <rdma/ib_verbs.h> +#include <rdma/ib_pack.h> +#include <rdma/rdma_cm.h> +#include <rdma/iw_cm.h> +#include <crypto/hash.h> + +#include "i40iw_status.h" +#include "i40iw_osdep.h" +#include "i40iw_d.h" +#include "i40iw_hmc.h" + +#include <i40e_client.h> +#include "i40iw_type.h" +#include "i40iw_p.h" +#include <rdma/i40iw-abi.h> +#include "i40iw_pble.h" +#include "i40iw_verbs.h" +#include "i40iw_cm.h" +#include "i40iw_user.h" +#include "i40iw_puda.h" + +#define I40IW_FW_VERSION 2 +#define I40IW_HW_VERSION 2 + +#define I40IW_ARP_ADD 1 +#define I40IW_ARP_DELETE 2 +#define I40IW_ARP_RESOLVE 3 + +#define I40IW_MACIP_ADD 1 +#define I40IW_MACIP_DELETE 2 + +#define IW_CCQ_SIZE (I40IW_CQP_SW_SQSIZE_2048 + 1) +#define IW_CEQ_SIZE 2048 +#define IW_AEQ_SIZE 2048 + +#define RX_BUF_SIZE (1536 + 8) +#define IW_REG0_SIZE (4 * 1024) +#define IW_TX_TIMEOUT (6 * HZ) +#define IW_FIRST_QPN 1 +#define IW_SW_CONTEXT_ALIGN 1024 + +#define MAX_DPC_ITERATIONS 128 + +#define I40IW_EVENT_TIMEOUT 100000 +#define I40IW_VCHNL_EVENT_TIMEOUT 100000 + +#define I40IW_NO_VLAN 0xffff +#define I40IW_NO_QSET 0xffff + +/* access to mcast filter list */ +#define IW_ADD_MCAST false +#define IW_DEL_MCAST true + +#define I40IW_DRV_OPT_ENABLE_MPA_VER_0 0x00000001 +#define I40IW_DRV_OPT_DISABLE_MPA_CRC 0x00000002 +#define I40IW_DRV_OPT_DISABLE_FIRST_WRITE 0x00000004 +#define I40IW_DRV_OPT_DISABLE_INTF 0x00000008 +#define I40IW_DRV_OPT_ENABLE_MSI 0x00000010 +#define I40IW_DRV_OPT_DUAL_LOGICAL_PORT 0x00000020 +#define I40IW_DRV_OPT_NO_INLINE_DATA 0x00000080 +#define I40IW_DRV_OPT_DISABLE_INT_MOD 0x00000100 +#define I40IW_DRV_OPT_DISABLE_VIRT_WQ 0x00000200 +#define I40IW_DRV_OPT_ENABLE_PAU 0x00000400 +#define I40IW_DRV_OPT_MCAST_LOGPORT_MAP 0x00000800 + +#define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types) +#define IW_CFG_FPM_QP_COUNT 32768 +#define I40IW_MAX_PAGES_PER_FMR 512 +#define I40IW_MIN_PAGES_PER_FMR 1 +#define I40IW_CQP_COMPL_RQ_WQE_FLUSHED 2 +#define I40IW_CQP_COMPL_SQ_WQE_FLUSHED 3 +#define I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED 4 + +struct i40iw_cqp_compl_info { + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + bool error; + u8 op_code; +}; + +#define i40iw_pr_err(fmt, args ...) pr_err("%s: "fmt, __func__, ## args) + +#define i40iw_pr_info(fmt, args ...) pr_info("%s: " fmt, __func__, ## args) + +#define i40iw_pr_warn(fmt, args ...) pr_warn("%s: " fmt, __func__, ## args) + +struct i40iw_cqp_request { + struct cqp_commands_info info; + wait_queue_head_t waitq; + struct list_head list; + atomic_t refcount; + void (*callback_fcn)(struct i40iw_cqp_request*, u32); + void *param; + struct i40iw_cqp_compl_info compl_info; + bool waiting; + bool request_done; + bool dynamic; +}; + +struct i40iw_cqp { + struct i40iw_sc_cqp sc_cqp; + spinlock_t req_lock; /*cqp request list */ + wait_queue_head_t waitq; + struct i40iw_dma_mem sq; + struct i40iw_dma_mem host_ctx; + u64 *scratch_array; + struct i40iw_cqp_request *cqp_requests; + struct list_head cqp_avail_reqs; + struct list_head cqp_pending_reqs; +}; + +struct i40iw_device; + +struct i40iw_ccq { + struct i40iw_sc_cq sc_cq; + spinlock_t lock; /* ccq control */ + wait_queue_head_t waitq; + struct i40iw_dma_mem mem_cq; + struct i40iw_dma_mem shadow_area; +}; + +struct i40iw_ceq { + struct i40iw_sc_ceq sc_ceq; + struct i40iw_dma_mem mem; + u32 irq; + u32 msix_idx; + struct i40iw_device *iwdev; + struct tasklet_struct dpc_tasklet; +}; + +struct i40iw_aeq { + struct i40iw_sc_aeq sc_aeq; + struct i40iw_dma_mem mem; +}; + +struct i40iw_arp_entry { + u32 ip_addr[4]; + u8 mac_addr[ETH_ALEN]; +}; + +enum init_completion_state { + INVALID_STATE = 0, + INITIAL_STATE, + CQP_CREATED, + HMC_OBJS_CREATED, + PBLE_CHUNK_MEM, + CCQ_CREATED, + AEQ_CREATED, + CEQ_CREATED, + ILQ_CREATED, + IEQ_CREATED, + IP_ADDR_REGISTERED, + RDMA_DEV_REGISTERED +}; + +struct i40iw_msix_vector { + u32 idx; + u32 irq; + u32 cpu_affinity; + u32 ceq_id; + cpumask_t mask; +}; + +struct l2params_work { + struct work_struct work; + struct i40iw_device *iwdev; + struct i40iw_l2params l2params; +}; + +#define I40IW_MSIX_TABLE_SIZE 65 + +struct virtchnl_work { + struct work_struct work; + union { + struct i40iw_cqp_request *cqp_request; + struct i40iw_virtchnl_work_info work_info; + }; +}; + +struct i40e_qvlist_info; + +struct i40iw_device { + struct i40iw_ib_device *iwibdev; + struct net_device *netdev; + wait_queue_head_t vchnl_waitq; + struct i40iw_sc_dev sc_dev; + struct i40iw_sc_vsi vsi; + struct i40iw_handler *hdl; + struct i40e_info *ldev; + struct i40e_client *client; + struct i40iw_hw hw; + struct i40iw_cm_core cm_core; + u8 *mem_resources; + unsigned long *allocated_qps; + unsigned long *allocated_cqs; + unsigned long *allocated_mrs; + unsigned long *allocated_pds; + unsigned long *allocated_arps; + struct i40iw_qp **qp_table; + bool msix_shared; + u32 msix_count; + struct i40iw_msix_vector *iw_msixtbl; + struct i40e_qvlist_info *iw_qvlist; + + struct i40iw_hmc_pble_rsrc *pble_rsrc; + struct i40iw_arp_entry *arp_table; + struct i40iw_cqp cqp; + struct i40iw_ccq ccq; + u32 ceqs_count; + struct i40iw_ceq *ceqlist; + struct i40iw_aeq aeq; + u32 arp_table_size; + u32 next_arp_index; + spinlock_t resource_lock; /* hw resource access */ + spinlock_t qptable_lock; + u32 vendor_id; + u32 vendor_part_id; + u32 of_device_registered; + + u32 device_cap_flags; + unsigned long db_start; + u8 resource_profile; + u8 max_rdma_vfs; + u8 max_enabled_vfs; + u8 max_sge; + u8 iw_status; + u8 send_term_ok; + bool push_mode; /* Initialized from parameter passed to driver */ + + /* x710 specific */ + struct mutex pbl_mutex; + struct tasklet_struct dpc_tasklet; + struct workqueue_struct *virtchnl_wq; + struct virtchnl_work virtchnl_w[I40IW_MAX_PE_ENABLED_VF_COUNT]; + struct i40iw_dma_mem obj_mem; + struct i40iw_dma_mem obj_next; + u8 *hmc_info_mem; + u32 sd_type; + struct workqueue_struct *param_wq; + atomic_t params_busy; + enum init_completion_state init_state; + u16 mac_ip_table_idx; + atomic_t vchnl_msgs; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 max_pd; + u32 next_qp; + u32 next_cq; + u32 next_pd; + u32 max_mr_size; + u32 max_qp_wr; + u32 max_cqe; + u32 mr_stagmask; + u32 mpa_version; + bool dcb; + bool closing; + bool reset; + u32 used_pds; + u32 used_cqs; + u32 used_mrs; + u32 used_qps; + wait_queue_head_t close_wq; + atomic64_t use_count; +}; + +struct i40iw_ib_device { + struct ib_device ibdev; + struct i40iw_device *iwdev; +}; + +struct i40iw_handler { + struct list_head list; + struct i40e_client *client; + struct i40iw_device device; + struct i40e_info ldev; +}; + +/** + * to_iwdev - get device + * @ibdev: ib device + **/ +static inline struct i40iw_device *to_iwdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct i40iw_ib_device, ibdev)->iwdev; +} + +/** + * to_ucontext - get user context + * @ibucontext: ib user context + **/ +static inline struct i40iw_ucontext *to_ucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct i40iw_ucontext, ibucontext); +} + +/** + * to_iwpd - get protection domain + * @ibpd: ib pd + **/ +static inline struct i40iw_pd *to_iwpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct i40iw_pd, ibpd); +} + +/** + * to_iwmr - get device memory region + * @ibdev: ib memory region + **/ +static inline struct i40iw_mr *to_iwmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct i40iw_mr, ibmr); +} + +/** + * to_iwmr_from_ibfmr - get device memory region + * @ibfmr: ib fmr + **/ +static inline struct i40iw_mr *to_iwmr_from_ibfmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct i40iw_mr, ibfmr); +} + +/** + * to_iwmw - get device memory window + * @ibmw: ib memory window + **/ +static inline struct i40iw_mr *to_iwmw(struct ib_mw *ibmw) +{ + return container_of(ibmw, struct i40iw_mr, ibmw); +} + +/** + * to_iwcq - get completion queue + * @ibcq: ib cqdevice + **/ +static inline struct i40iw_cq *to_iwcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct i40iw_cq, ibcq); +} + +/** + * to_iwqp - get device qp + * @ibqp: ib qp + **/ +static inline struct i40iw_qp *to_iwqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct i40iw_qp, ibqp); +} + +/* i40iw.c */ +void i40iw_add_ref(struct ib_qp *); +void i40iw_rem_ref(struct ib_qp *); +struct ib_qp *i40iw_get_qp(struct ib_device *, int); + +void i40iw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_qp *qp); + +void i40iw_manage_arp_cache(struct i40iw_device *iwdev, + unsigned char *mac_addr, + u32 *ip_addr, + bool ipv4, + u32 action); + +int i40iw_manage_apbvt(struct i40iw_device *iwdev, + u16 accel_local_port, + bool add_port); + +struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait); +void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); +void i40iw_put_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request); + +/** + * i40iw_alloc_resource - allocate a resource + * @iwdev: device pointer + * @resource_array: resource bit array: + * @max_resources: maximum resource number + * @req_resources_num: Allocated resource number + * @next: next free id + **/ +static inline int i40iw_alloc_resource(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 max_resources, + u32 *req_resource_num, + u32 *next) +{ + u32 resource_num; + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + resource_num = find_next_zero_bit(resource_array, max_resources, *next); + if (resource_num >= max_resources) { + resource_num = find_first_zero_bit(resource_array, max_resources); + if (resource_num >= max_resources) { + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + return -EOVERFLOW; + } + } + set_bit(resource_num, resource_array); + *next = resource_num + 1; + if (*next == max_resources) + *next = 0; + *req_resource_num = resource_num; + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + + return 0; +} + +/** + * i40iw_is_resource_allocated - detrmine if resource is + * allocated + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to check + **/ +static inline bool i40iw_is_resource_allocated(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 resource_num) +{ + bool bit_is_set; + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + + bit_is_set = test_bit(resource_num, resource_array); + spin_unlock_irqrestore(&iwdev->resource_lock, flags); + + return bit_is_set; +} + +/** + * i40iw_free_resource - free a resource + * @iwdev: device pointer + * @resource_array: resource array for the resource_num + * @resource_num: resource number to free + **/ +static inline void i40iw_free_resource(struct i40iw_device *iwdev, + unsigned long *resource_array, + u32 resource_num) +{ + unsigned long flags; + + spin_lock_irqsave(&iwdev->resource_lock, flags); + clear_bit(resource_num, resource_array); + spin_unlock_irqrestore(&iwdev->resource_lock, flags); +} + +/** + * to_iwhdl - Get the handler from the device pointer + * @iwdev: device pointer + **/ +static inline struct i40iw_handler *to_iwhdl(struct i40iw_device *iw_dev) +{ + return container_of(iw_dev, struct i40iw_handler, device); +} + +struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev); + +/** + * iw_init_resources - + */ +u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev); + +int i40iw_register_rdma_device(struct i40iw_device *iwdev); +void i40iw_port_ibevent(struct i40iw_device *iwdev); +void i40iw_cm_disconn(struct i40iw_qp *iwqp); +void i40iw_cm_disconn_worker(void *); +int mini_cm_recv_pkt(struct i40iw_cm_core *, struct i40iw_device *, + struct sk_buff *); + +enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev, + struct i40iw_cqp_request *cqp_request); +enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev, + u8 *mac_addr, u8 *mac_index); +int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); +void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); + +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev); +void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); +void i40iw_add_pdusecount(struct i40iw_pd *iwpd); +void i40iw_rem_devusecount(struct i40iw_device *iwdev); +void i40iw_add_devusecount(struct i40iw_device *iwdev); +void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, + struct i40iw_modify_qp_info *info, bool wait); + +void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, + struct i40iw_sc_qp *qp, + bool suspend); +enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, + struct i40iw_cm_info *cminfo, + enum i40iw_quad_entry_type etype, + enum i40iw_quad_hash_manage_type mtype, + void *cmnode, + bool wait); +void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf); +void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp); +void i40iw_free_qp_resources(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + u32 qp_num); +enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, + struct i40iw_dma_mem *memptr, + u32 size, u32 mask); + +void i40iw_request_reset(struct i40iw_device *iwdev); +void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev); +void i40iw_setup_cm_core(struct i40iw_device *iwdev); +void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core); +void i40iw_process_ceq(struct i40iw_device *, struct i40iw_ceq *iwceq); +void i40iw_process_aeq(struct i40iw_device *); +void i40iw_next_iw_state(struct i40iw_qp *iwqp, + u8 state, u8 del_hash, + u8 term, u8 term_len); +int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack); +int i40iw_send_reset(struct i40iw_cm_node *cm_node); +struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core, + u16 rem_port, + u32 *rem_addr, + u16 loc_port, + u32 *loc_addr, + bool add_refcnt, + bool accelerated_list); + +enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + bool wait); + +void i40iw_gen_ae(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_gen_ae_info *info, + bool wait); + +void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src); +struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *ib_pd, + u64 addr, + u64 size, + int acc, + u64 *iova_start); + +int i40iw_inetaddr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); +int i40iw_inet6addr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); +int i40iw_net_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); +int i40iw_netdevice_event(struct notifier_block *notifier, + unsigned long event, + void *ptr); + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c new file mode 100644 index 000000000..0273d0404 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -0,0 +1,4398 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/atomic.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/if_arp.h> +#include <linux/if_vlan.h> +#include <linux/notifier.h> +#include <linux/net.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/time.h> +#include <linux/delay.h> +#include <linux/etherdevice.h> +#include <linux/netdevice.h> +#include <linux/random.h> +#include <linux/list.h> +#include <linux/threads.h> +#include <linux/highmem.h> +#include <net/arp.h> +#include <net/ndisc.h> +#include <net/neighbour.h> +#include <net/route.h> +#include <net/addrconf.h> +#include <net/ip6_route.h> +#include <net/ip_fib.h> +#include <net/secure_seq.h> +#include <net/tcp.h> +#include <asm/checksum.h> + +#include "i40iw.h" + +static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *); +static void i40iw_cm_post_event(struct i40iw_cm_event *event); +static void i40iw_disconnect_worker(struct work_struct *work); + +/** + * i40iw_free_sqbuf - put back puda buffer if refcount = 0 + * @vsi: pointer to vsi structure + * @buf: puda buffer to free + */ +void i40iw_free_sqbuf(struct i40iw_sc_vsi *vsi, void *bufp) +{ + struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)bufp; + struct i40iw_puda_rsrc *ilq = vsi->ilq; + + if (!atomic_dec_return(&buf->refcount)) + i40iw_puda_ret_bufpool(ilq, buf); +} + +/** + * i40iw_derive_hw_ird_setting - Calculate IRD + * + * @cm_ird: IRD of connection's node + * + * The ird from the connection is rounded to a supported HW + * setting (2,8,32,64) and then encoded for ird_size field of + * qp_ctx + */ +static u8 i40iw_derive_hw_ird_setting(u16 cm_ird) +{ + u8 encoded_ird_size; + + /* ird_size field is encoded in qp_ctx */ + switch (cm_ird ? roundup_pow_of_two(cm_ird) : 0) { + case I40IW_HW_IRD_SETTING_64: + encoded_ird_size = 3; + break; + case I40IW_HW_IRD_SETTING_32: + case I40IW_HW_IRD_SETTING_16: + encoded_ird_size = 2; + break; + case I40IW_HW_IRD_SETTING_8: + case I40IW_HW_IRD_SETTING_4: + encoded_ird_size = 1; + break; + case I40IW_HW_IRD_SETTING_2: + default: + encoded_ird_size = 0; + break; + } + return encoded_ird_size; +} + +/** + * i40iw_record_ird_ord - Record IRD/ORD passed in + * @cm_node: connection's node + * @conn_ird: connection IRD + * @conn_ord: connection ORD + */ +static void i40iw_record_ird_ord(struct i40iw_cm_node *cm_node, u32 conn_ird, + u32 conn_ord) +{ + if (conn_ird > I40IW_MAX_IRD_SIZE) + conn_ird = I40IW_MAX_IRD_SIZE; + + if (conn_ord > I40IW_MAX_ORD_SIZE) + conn_ord = I40IW_MAX_ORD_SIZE; + else if (!conn_ord && cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO) + conn_ord = 1; + + cm_node->ird_size = conn_ird; + cm_node->ord_size = conn_ord; +} + +/** + * i40iw_copy_ip_ntohl - change network to host ip + * @dst: host ip + * @src: big endian + */ +void i40iw_copy_ip_ntohl(u32 *dst, __be32 *src) +{ + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst++ = ntohl(*src++); + *dst = ntohl(*src); +} + +/** + * i40iw_copy_ip_htonl - change host addr to network ip + * @dst: host ip + * @src: little endian + */ +static inline void i40iw_copy_ip_htonl(__be32 *dst, u32 *src) +{ + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst++ = htonl(*src++); + *dst = htonl(*src); +} + +/** + * i40iw_fill_sockaddr4 - get addr info for passive connection + * @cm_node: connection's node + * @event: upper layer's cm event + */ +static inline void i40iw_fill_sockaddr4(struct i40iw_cm_node *cm_node, + struct iw_cm_event *event) +{ + struct sockaddr_in *laddr = (struct sockaddr_in *)&event->local_addr; + struct sockaddr_in *raddr = (struct sockaddr_in *)&event->remote_addr; + + laddr->sin_family = AF_INET; + raddr->sin_family = AF_INET; + + laddr->sin_port = htons(cm_node->loc_port); + raddr->sin_port = htons(cm_node->rem_port); + + laddr->sin_addr.s_addr = htonl(cm_node->loc_addr[0]); + raddr->sin_addr.s_addr = htonl(cm_node->rem_addr[0]); +} + +/** + * i40iw_fill_sockaddr6 - get ipv6 addr info for passive side + * @cm_node: connection's node + * @event: upper layer's cm event + */ +static inline void i40iw_fill_sockaddr6(struct i40iw_cm_node *cm_node, + struct iw_cm_event *event) +{ + struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&event->local_addr; + struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)&event->remote_addr; + + laddr6->sin6_family = AF_INET6; + raddr6->sin6_family = AF_INET6; + + laddr6->sin6_port = htons(cm_node->loc_port); + raddr6->sin6_port = htons(cm_node->rem_port); + + i40iw_copy_ip_htonl(laddr6->sin6_addr.in6_u.u6_addr32, + cm_node->loc_addr); + i40iw_copy_ip_htonl(raddr6->sin6_addr.in6_u.u6_addr32, + cm_node->rem_addr); +} + +/** + * i40iw_get_addr_info + * @cm_node: contains ip/tcp info + * @cm_info: to get a copy of the cm_node ip/tcp info +*/ +static void i40iw_get_addr_info(struct i40iw_cm_node *cm_node, + struct i40iw_cm_info *cm_info) +{ + cm_info->ipv4 = cm_node->ipv4; + cm_info->vlan_id = cm_node->vlan_id; + memcpy(cm_info->loc_addr, cm_node->loc_addr, sizeof(cm_info->loc_addr)); + memcpy(cm_info->rem_addr, cm_node->rem_addr, sizeof(cm_info->rem_addr)); + cm_info->loc_port = cm_node->loc_port; + cm_info->rem_port = cm_node->rem_port; + cm_info->user_pri = cm_node->user_pri; +} + +/** + * i40iw_get_cmevent_info - for cm event upcall + * @cm_node: connection's node + * @cm_id: upper layers cm struct for the event + * @event: upper layer's cm event + */ +static inline void i40iw_get_cmevent_info(struct i40iw_cm_node *cm_node, + struct iw_cm_id *cm_id, + struct iw_cm_event *event) +{ + memcpy(&event->local_addr, &cm_id->m_local_addr, + sizeof(event->local_addr)); + memcpy(&event->remote_addr, &cm_id->m_remote_addr, + sizeof(event->remote_addr)); + if (cm_node) { + event->private_data = (void *)cm_node->pdata_buf; + event->private_data_len = (u8)cm_node->pdata.size; + event->ird = cm_node->ird_size; + event->ord = cm_node->ord_size; + } +} + +/** + * i40iw_send_cm_event - upcall cm's event handler + * @cm_node: connection's node + * @cm_id: upper layer's cm info struct + * @type: Event type to indicate + * @status: status for the event type + */ +static int i40iw_send_cm_event(struct i40iw_cm_node *cm_node, + struct iw_cm_id *cm_id, + enum iw_cm_event_type type, + int status) +{ + struct iw_cm_event event; + + memset(&event, 0, sizeof(event)); + event.event = type; + event.status = status; + switch (type) { + case IW_CM_EVENT_CONNECT_REQUEST: + if (cm_node->ipv4) + i40iw_fill_sockaddr4(cm_node, &event); + else + i40iw_fill_sockaddr6(cm_node, &event); + event.provider_data = (void *)cm_node; + event.private_data = (void *)cm_node->pdata_buf; + event.private_data_len = (u8)cm_node->pdata.size; + event.ird = cm_node->ird_size; + break; + case IW_CM_EVENT_CONNECT_REPLY: + i40iw_get_cmevent_info(cm_node, cm_id, &event); + break; + case IW_CM_EVENT_ESTABLISHED: + event.ird = cm_node->ird_size; + event.ord = cm_node->ord_size; + break; + case IW_CM_EVENT_DISCONNECT: + break; + case IW_CM_EVENT_CLOSE: + break; + default: + i40iw_pr_err("event type received type = %d\n", type); + return -1; + } + return cm_id->event_handler(cm_id, &event); +} + +/** + * i40iw_create_event - create cm event + * @cm_node: connection's node + * @type: Event type to generate + */ +static struct i40iw_cm_event *i40iw_create_event(struct i40iw_cm_node *cm_node, + enum i40iw_cm_event_type type) +{ + struct i40iw_cm_event *event; + + if (!cm_node->cm_id) + return NULL; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + + if (!event) + return NULL; + + event->type = type; + event->cm_node = cm_node; + memcpy(event->cm_info.rem_addr, cm_node->rem_addr, sizeof(event->cm_info.rem_addr)); + memcpy(event->cm_info.loc_addr, cm_node->loc_addr, sizeof(event->cm_info.loc_addr)); + event->cm_info.rem_port = cm_node->rem_port; + event->cm_info.loc_port = cm_node->loc_port; + event->cm_info.cm_id = cm_node->cm_id; + + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "node=%p event=%p type=%u dst=%pI4 src=%pI4\n", + cm_node, + event, + type, + event->cm_info.loc_addr, + event->cm_info.rem_addr); + + i40iw_cm_post_event(event); + return event; +} + +/** + * i40iw_free_retrans_entry - free send entry + * @cm_node: connection's node + */ +static void i40iw_free_retrans_entry(struct i40iw_cm_node *cm_node) +{ + struct i40iw_device *iwdev = cm_node->iwdev; + struct i40iw_timer_entry *send_entry; + + send_entry = cm_node->send_entry; + if (send_entry) { + cm_node->send_entry = NULL; + i40iw_free_sqbuf(&iwdev->vsi, (void *)send_entry->sqbuf); + kfree(send_entry); + atomic_dec(&cm_node->ref_count); + } +} + +/** + * i40iw_cleanup_retrans_entry - free send entry with lock + * @cm_node: connection's node + */ +static void i40iw_cleanup_retrans_entry(struct i40iw_cm_node *cm_node) +{ + unsigned long flags; + + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + i40iw_free_retrans_entry(cm_node); + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); +} + +/** + * i40iw_form_cm_frame - get a free packet and build frame + * @cm_node: connection's node ionfo to use in frame + * @options: pointer to options info + * @hdr: pointer mpa header + * @pdata: pointer to private data + * @flags: indicates FIN or ACK + */ +static struct i40iw_puda_buf *i40iw_form_cm_frame(struct i40iw_cm_node *cm_node, + struct i40iw_kmem_info *options, + struct i40iw_kmem_info *hdr, + struct i40iw_kmem_info *pdata, + u8 flags) +{ + struct i40iw_puda_buf *sqbuf; + struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi; + u8 *buf; + + struct tcphdr *tcph; + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct ethhdr *ethh; + u16 packetsize; + u16 eth_hlen = ETH_HLEN; + u32 opts_len = 0; + u32 pd_len = 0; + u32 hdr_len = 0; + u16 vtag; + + sqbuf = i40iw_puda_get_bufpool(vsi->ilq); + if (!sqbuf) + return NULL; + buf = sqbuf->mem.va; + + if (options) + opts_len = (u32)options->size; + + if (hdr) + hdr_len = hdr->size; + + if (pdata) + pd_len = pdata->size; + + if (cm_node->vlan_id < VLAN_TAG_PRESENT) + eth_hlen += 4; + + if (cm_node->ipv4) + packetsize = sizeof(*iph) + sizeof(*tcph); + else + packetsize = sizeof(*ip6h) + sizeof(*tcph); + packetsize += opts_len + hdr_len + pd_len; + + memset(buf, 0x00, eth_hlen + packetsize); + + sqbuf->totallen = packetsize + eth_hlen; + sqbuf->maclen = eth_hlen; + sqbuf->tcphlen = sizeof(*tcph) + opts_len; + sqbuf->scratch = (void *)cm_node; + + ethh = (struct ethhdr *)buf; + buf += eth_hlen; + + if (cm_node->ipv4) { + sqbuf->ipv4 = true; + + iph = (struct iphdr *)buf; + buf += sizeof(*iph); + tcph = (struct tcphdr *)buf; + buf += sizeof(*tcph); + + ether_addr_copy(ethh->h_dest, cm_node->rem_mac); + ether_addr_copy(ethh->h_source, cm_node->loc_mac); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); + vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; + ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); + + ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IP); + } else { + ethh->h_proto = htons(ETH_P_IP); + } + + iph->version = IPVERSION; + iph->ihl = 5; /* 5 * 4Byte words, IP headr len */ + iph->tos = cm_node->tos; + iph->tot_len = htons(packetsize); + iph->id = htons(++cm_node->tcp_cntxt.loc_id); + + iph->frag_off = htons(0x4000); + iph->ttl = 0x40; + iph->protocol = IPPROTO_TCP; + iph->saddr = htonl(cm_node->loc_addr[0]); + iph->daddr = htonl(cm_node->rem_addr[0]); + } else { + sqbuf->ipv4 = false; + ip6h = (struct ipv6hdr *)buf; + buf += sizeof(*ip6h); + tcph = (struct tcphdr *)buf; + buf += sizeof(*tcph); + + ether_addr_copy(ethh->h_dest, cm_node->rem_mac); + ether_addr_copy(ethh->h_source, cm_node->loc_mac); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + ((struct vlan_ethhdr *)ethh)->h_vlan_proto = htons(ETH_P_8021Q); + vtag = (cm_node->user_pri << VLAN_PRIO_SHIFT) | cm_node->vlan_id; + ((struct vlan_ethhdr *)ethh)->h_vlan_TCI = htons(vtag); + ((struct vlan_ethhdr *)ethh)->h_vlan_encapsulated_proto = htons(ETH_P_IPV6); + } else { + ethh->h_proto = htons(ETH_P_IPV6); + } + ip6h->version = 6; + ip6h->priority = cm_node->tos >> 4; + ip6h->flow_lbl[0] = cm_node->tos << 4; + ip6h->flow_lbl[1] = 0; + ip6h->flow_lbl[2] = 0; + ip6h->payload_len = htons(packetsize - sizeof(*ip6h)); + ip6h->nexthdr = 6; + ip6h->hop_limit = 128; + i40iw_copy_ip_htonl(ip6h->saddr.in6_u.u6_addr32, + cm_node->loc_addr); + i40iw_copy_ip_htonl(ip6h->daddr.in6_u.u6_addr32, + cm_node->rem_addr); + } + + tcph->source = htons(cm_node->loc_port); + tcph->dest = htons(cm_node->rem_port); + + tcph->seq = htonl(cm_node->tcp_cntxt.loc_seq_num); + + if (flags & SET_ACK) { + cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt; + tcph->ack_seq = htonl(cm_node->tcp_cntxt.loc_ack_num); + tcph->ack = 1; + } else { + tcph->ack_seq = 0; + } + + if (flags & SET_SYN) { + cm_node->tcp_cntxt.loc_seq_num++; + tcph->syn = 1; + } else { + cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len; + } + + if (flags & SET_FIN) { + cm_node->tcp_cntxt.loc_seq_num++; + tcph->fin = 1; + } + + if (flags & SET_RST) + tcph->rst = 1; + + tcph->doff = (u16)((sizeof(*tcph) + opts_len + 3) >> 2); + sqbuf->tcphlen = tcph->doff << 2; + tcph->window = htons(cm_node->tcp_cntxt.rcv_wnd); + tcph->urg_ptr = 0; + + if (opts_len) { + memcpy(buf, options->addr, opts_len); + buf += opts_len; + } + + if (hdr_len) { + memcpy(buf, hdr->addr, hdr_len); + buf += hdr_len; + } + + if (pdata && pdata->addr) + memcpy(buf, pdata->addr, pdata->size); + + atomic_set(&sqbuf->refcount, 1); + + return sqbuf; +} + +/** + * i40iw_send_reset - Send RST packet + * @cm_node: connection's node + */ +int i40iw_send_reset(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + int flags = SET_RST | SET_ACK; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, flags); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 0, 1); +} + +/** + * i40iw_active_open_err - send event for active side cm error + * @cm_node: connection's node + * @reset: Flag to send reset or not + */ +static void i40iw_active_open_err(struct i40iw_cm_node *cm_node, bool reset) +{ + i40iw_cleanup_retrans_entry(cm_node); + cm_node->cm_core->stats_connect_errs++; + if (reset) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s cm_node=%p state=%d\n", + __func__, + cm_node, + cm_node->state); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + } + + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); +} + +/** + * i40iw_passive_open_err - handle passive side cm error + * @cm_node: connection's node + * @reset: send reset or just free cm_node + */ +static void i40iw_passive_open_err(struct i40iw_cm_node *cm_node, bool reset) +{ + i40iw_cleanup_retrans_entry(cm_node); + cm_node->cm_core->stats_passive_errs++; + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s cm_node=%p state =%d\n", + __func__, + cm_node, + cm_node->state); + if (reset) + i40iw_send_reset(cm_node); + else + i40iw_rem_ref_cm_node(cm_node); +} + +/** + * i40iw_event_connect_error - to create connect error event + * @event: cm information for connect event + */ +static void i40iw_event_connect_error(struct i40iw_cm_event *event) +{ + struct i40iw_qp *iwqp; + struct iw_cm_id *cm_id; + + cm_id = event->cm_node->cm_id; + if (!cm_id) + return; + + iwqp = cm_id->provider_data; + + if (!iwqp || !iwqp->iwdev) + return; + + iwqp->cm_id = NULL; + cm_id->provider_data = NULL; + i40iw_send_cm_event(event->cm_node, cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNRESET); + cm_id->rem_ref(cm_id); + i40iw_rem_ref_cm_node(event->cm_node); +} + +/** + * i40iw_process_options + * @cm_node: connection's node + * @optionsloc: point to start of options + * @optionsize: size of all options + * @syn_packet: flag if syn packet + */ +static int i40iw_process_options(struct i40iw_cm_node *cm_node, + u8 *optionsloc, + u32 optionsize, + u32 syn_packet) +{ + u32 tmp; + u32 offset = 0; + union all_known_options *all_options; + char got_mss_option = 0; + + while (offset < optionsize) { + all_options = (union all_known_options *)(optionsloc + offset); + switch (all_options->as_base.optionnum) { + case OPTION_NUMBER_END: + offset = optionsize; + break; + case OPTION_NUMBER_NONE: + offset += 1; + continue; + case OPTION_NUMBER_MSS: + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s: MSS Length: %d Offset: %d Size: %d\n", + __func__, + all_options->as_mss.length, + offset, + optionsize); + got_mss_option = 1; + if (all_options->as_mss.length != 4) + return -1; + tmp = ntohs(all_options->as_mss.mss); + if (tmp > 0 && tmp < cm_node->tcp_cntxt.mss) + cm_node->tcp_cntxt.mss = tmp; + break; + case OPTION_NUMBER_WINDOW_SCALE: + cm_node->tcp_cntxt.snd_wscale = + all_options->as_windowscale.shiftcount; + break; + default: + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "TCP Option not understood: %x\n", + all_options->as_base.optionnum); + break; + } + offset += all_options->as_base.length; + } + if (!got_mss_option && syn_packet) + cm_node->tcp_cntxt.mss = I40IW_CM_DEFAULT_MSS; + return 0; +} + +/** + * i40iw_handle_tcp_options - + * @cm_node: connection's node + * @tcph: pointer tcp header + * @optionsize: size of options rcvd + * @passive: active or passive flag + */ +static int i40iw_handle_tcp_options(struct i40iw_cm_node *cm_node, + struct tcphdr *tcph, + int optionsize, + int passive) +{ + u8 *optionsloc = (u8 *)&tcph[1]; + + if (optionsize) { + if (i40iw_process_options(cm_node, + optionsloc, + optionsize, + (u32)tcph->syn)) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "%s: Node %p, Sending RESET\n", + __func__, + cm_node); + if (passive) + i40iw_passive_open_err(cm_node, true); + else + i40iw_active_open_err(cm_node, true); + return -1; + } + } + + cm_node->tcp_cntxt.snd_wnd = ntohs(tcph->window) << + cm_node->tcp_cntxt.snd_wscale; + + if (cm_node->tcp_cntxt.snd_wnd > cm_node->tcp_cntxt.max_snd_wnd) + cm_node->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.snd_wnd; + return 0; +} + +/** + * i40iw_build_mpa_v1 - build a MPA V1 frame + * @cm_node: connection's node + * @mpa_key: to do read0 or write0 + */ +static void i40iw_build_mpa_v1(struct i40iw_cm_node *cm_node, + void *start_addr, + u8 mpa_key) +{ + struct ietf_mpa_v1 *mpa_frame = (struct ietf_mpa_v1 *)start_addr; + + switch (mpa_key) { + case MPA_KEY_REQUEST: + memcpy(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE); + break; + case MPA_KEY_REPLY: + memcpy(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE); + break; + default: + break; + } + mpa_frame->flags = IETF_MPA_FLAGS_CRC; + mpa_frame->rev = cm_node->mpa_frame_rev; + mpa_frame->priv_data_len = htons(cm_node->pdata.size); +} + +/** + * i40iw_build_mpa_v2 - build a MPA V2 frame + * @cm_node: connection's node + * @start_addr: buffer start address + * @mpa_key: to do read0 or write0 + */ +static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, + void *start_addr, + u8 mpa_key) +{ + struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; + struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; + u16 ctrl_ird, ctrl_ord; + + /* initialize the upper 5 bytes of the frame */ + i40iw_build_mpa_v1(cm_node, start_addr, mpa_key); + mpa_frame->flags |= IETF_MPA_V2_FLAG; + mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); + + /* initialize RTR msg */ + if (cm_node->mpav2_ird_ord == IETF_NO_IRD_ORD) { + ctrl_ird = IETF_NO_IRD_ORD; + ctrl_ord = IETF_NO_IRD_ORD; + } else { + ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? + IETF_NO_IRD_ORD : cm_node->ird_size; + ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? + IETF_NO_IRD_ORD : cm_node->ord_size; + } + + ctrl_ird |= IETF_PEER_TO_PEER; + + switch (mpa_key) { + case MPA_KEY_REQUEST: + ctrl_ord |= IETF_RDMA0_WRITE; + ctrl_ord |= IETF_RDMA0_READ; + break; + case MPA_KEY_REPLY: + switch (cm_node->send_rdma0_op) { + case SEND_RDMA_WRITE_ZERO: + ctrl_ord |= IETF_RDMA0_WRITE; + break; + case SEND_RDMA_READ_ZERO: + ctrl_ord |= IETF_RDMA0_READ; + break; + } + break; + default: + break; + } + rtr_msg->ctrl_ird = htons(ctrl_ird); + rtr_msg->ctrl_ord = htons(ctrl_ord); +} + +/** + * i40iw_cm_build_mpa_frame - build mpa frame for mpa version 1 or version 2 + * @cm_node: connection's node + * @mpa: mpa: data buffer + * @mpa_key: to do read0 or write0 + */ +static int i40iw_cm_build_mpa_frame(struct i40iw_cm_node *cm_node, + struct i40iw_kmem_info *mpa, + u8 mpa_key) +{ + int hdr_len = 0; + + switch (cm_node->mpa_frame_rev) { + case IETF_MPA_V1: + hdr_len = sizeof(struct ietf_mpa_v1); + i40iw_build_mpa_v1(cm_node, mpa->addr, mpa_key); + break; + case IETF_MPA_V2: + hdr_len = sizeof(struct ietf_mpa_v2); + i40iw_build_mpa_v2(cm_node, mpa->addr, mpa_key); + break; + default: + break; + } + + return hdr_len; +} + +/** + * i40iw_send_mpa_request - active node send mpa request to passive node + * @cm_node: connection's node + */ +static int i40iw_send_mpa_request(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + + if (!cm_node) { + i40iw_pr_err("cm_node == NULL\n"); + return -1; + } + + cm_node->mpa_hdr.addr = &cm_node->mpa_frame; + cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, + &cm_node->mpa_hdr, + MPA_KEY_REQUEST); + if (!cm_node->mpa_hdr.size) { + i40iw_pr_err("mpa size = %d\n", cm_node->mpa_hdr.size); + return -1; + } + + sqbuf = i40iw_form_cm_frame(cm_node, + NULL, + &cm_node->mpa_hdr, + &cm_node->pdata, + SET_ACK); + if (!sqbuf) { + i40iw_pr_err("sq_buf == NULL\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_send_mpa_reject - + * @cm_node: connection's node + * @pdata: reject data for connection + * @plen: length of reject data + */ +static int i40iw_send_mpa_reject(struct i40iw_cm_node *cm_node, + const void *pdata, + u8 plen) +{ + struct i40iw_puda_buf *sqbuf; + struct i40iw_kmem_info priv_info; + + cm_node->mpa_hdr.addr = &cm_node->mpa_frame; + cm_node->mpa_hdr.size = i40iw_cm_build_mpa_frame(cm_node, + &cm_node->mpa_hdr, + MPA_KEY_REPLY); + + cm_node->mpa_frame.flags |= IETF_MPA_FLAGS_REJECT; + priv_info.addr = (void *)pdata; + priv_info.size = plen; + + sqbuf = i40iw_form_cm_frame(cm_node, + NULL, + &cm_node->mpa_hdr, + &priv_info, + SET_ACK | SET_FIN); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -ENOMEM; + } + cm_node->state = I40IW_CM_STATE_FIN_WAIT1; + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * recv_mpa - process an IETF MPA frame + * @cm_node: connection's node + * @buffer: Data pointer + * @type: to return accept or reject + * @len: Len of mpa buffer + */ +static int i40iw_parse_mpa(struct i40iw_cm_node *cm_node, u8 *buffer, u32 *type, u32 len) +{ + struct ietf_mpa_v1 *mpa_frame; + struct ietf_mpa_v2 *mpa_v2_frame; + struct ietf_rtr_msg *rtr_msg; + int mpa_hdr_len; + int priv_data_len; + + *type = I40IW_MPA_REQUEST_ACCEPT; + + if (len < sizeof(struct ietf_mpa_v1)) { + i40iw_pr_err("ietf buffer small (%x)\n", len); + return -1; + } + + mpa_frame = (struct ietf_mpa_v1 *)buffer; + mpa_hdr_len = sizeof(struct ietf_mpa_v1); + priv_data_len = ntohs(mpa_frame->priv_data_len); + + if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) { + i40iw_pr_err("large pri_data %d\n", priv_data_len); + return -1; + } + if (mpa_frame->rev != IETF_MPA_V1 && mpa_frame->rev != IETF_MPA_V2) { + i40iw_pr_err("unsupported mpa rev = %d\n", mpa_frame->rev); + return -1; + } + if (mpa_frame->rev > cm_node->mpa_frame_rev) { + i40iw_pr_err("rev %d\n", mpa_frame->rev); + return -1; + } + cm_node->mpa_frame_rev = mpa_frame->rev; + + if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { + if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REQ, IETF_MPA_KEY_SIZE)) { + i40iw_pr_err("Unexpected MPA Key received\n"); + return -1; + } + } else { + if (memcmp(mpa_frame->key, IEFT_MPA_KEY_REP, IETF_MPA_KEY_SIZE)) { + i40iw_pr_err("Unexpected MPA Key received\n"); + return -1; + } + } + + if (priv_data_len + mpa_hdr_len > len) { + i40iw_pr_err("ietf buffer len(%x + %x != %x)\n", + priv_data_len, mpa_hdr_len, len); + return -1; + } + if (len > MAX_CM_BUFFER) { + i40iw_pr_err("ietf buffer large len = %d\n", len); + return -1; + } + + switch (mpa_frame->rev) { + case IETF_MPA_V2:{ + u16 ird_size; + u16 ord_size; + u16 ctrl_ord; + u16 ctrl_ird; + + mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; + mpa_hdr_len += IETF_RTR_MSG_SIZE; + rtr_msg = &mpa_v2_frame->rtr_msg; + + /* parse rtr message */ + ctrl_ord = ntohs(rtr_msg->ctrl_ord); + ctrl_ird = ntohs(rtr_msg->ctrl_ird); + ird_size = ctrl_ird & IETF_NO_IRD_ORD; + ord_size = ctrl_ord & IETF_NO_IRD_ORD; + + if (!(ctrl_ird & IETF_PEER_TO_PEER)) + return -1; + + if (ird_size == IETF_NO_IRD_ORD || ord_size == IETF_NO_IRD_ORD) { + cm_node->mpav2_ird_ord = IETF_NO_IRD_ORD; + goto negotiate_done; + } + + if (cm_node->state != I40IW_CM_STATE_MPAREQ_SENT) { + /* responder */ + if (!ord_size && (ctrl_ord & IETF_RDMA0_READ)) + cm_node->ird_size = 1; + if (cm_node->ord_size > ird_size) + cm_node->ord_size = ird_size; + } else { + /* initiator */ + if (!ird_size && (ctrl_ord & IETF_RDMA0_READ)) + return -1; + if (cm_node->ord_size > ird_size) + cm_node->ord_size = ird_size; + + if (cm_node->ird_size < ord_size) + /* no resources available */ + return -1; + } + +negotiate_done: + if (ctrl_ord & IETF_RDMA0_READ) + cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; + else if (ctrl_ord & IETF_RDMA0_WRITE) + cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; + else /* Not supported RDMA0 operation */ + return -1; + i40iw_debug(cm_node->dev, I40IW_DEBUG_CM, + "MPAV2: Negotiated ORD: %d, IRD: %d\n", + cm_node->ord_size, cm_node->ird_size); + break; + } + break; + case IETF_MPA_V1: + default: + break; + } + + memcpy(cm_node->pdata_buf, buffer + mpa_hdr_len, priv_data_len); + cm_node->pdata.size = priv_data_len; + + if (mpa_frame->flags & IETF_MPA_FLAGS_REJECT) + *type = I40IW_MPA_REQUEST_REJECT; + + if (mpa_frame->flags & IETF_MPA_FLAGS_MARKERS) + cm_node->snd_mark_en = true; + + return 0; +} + +/** + * i40iw_schedule_cm_timer + * @@cm_node: connection's node + * @sqbuf: buffer to send + * @type: if it is send or close + * @send_retrans: if rexmits to be done + * @close_when_complete: is cm_node to be removed + * + * note - cm_node needs to be protected before calling this. Encase in: + * i40iw_rem_ref_cm_node(cm_core, cm_node); + * i40iw_schedule_cm_timer(...) + * atomic_inc(&cm_node->ref_count); + */ +int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *sqbuf, + enum i40iw_timer_type type, + int send_retrans, + int close_when_complete) +{ + struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi; + struct i40iw_cm_core *cm_core = cm_node->cm_core; + struct i40iw_timer_entry *new_send; + int ret = 0; + u32 was_timer_set; + unsigned long flags; + + new_send = kzalloc(sizeof(*new_send), GFP_ATOMIC); + if (!new_send) { + if (type != I40IW_TIMER_TYPE_CLOSE) + i40iw_free_sqbuf(vsi, (void *)sqbuf); + return -ENOMEM; + } + new_send->retrycount = I40IW_DEFAULT_RETRYS; + new_send->retranscount = I40IW_DEFAULT_RETRANS; + new_send->sqbuf = sqbuf; + new_send->timetosend = jiffies; + new_send->type = type; + new_send->send_retrans = send_retrans; + new_send->close_when_complete = close_when_complete; + + if (type == I40IW_TIMER_TYPE_CLOSE) { + new_send->timetosend += (HZ / 10); + if (cm_node->close_entry) { + kfree(new_send); + i40iw_pr_err("already close entry\n"); + return -EINVAL; + } + cm_node->close_entry = new_send; + } + + if (type == I40IW_TIMER_TYPE_SEND) { + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + cm_node->send_entry = new_send; + atomic_inc(&cm_node->ref_count); + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + new_send->timetosend = jiffies + I40IW_RETRY_TIMEOUT; + + atomic_inc(&sqbuf->refcount); + i40iw_puda_send_buf(vsi->ilq, sqbuf); + if (!send_retrans) { + i40iw_cleanup_retrans_entry(cm_node); + if (close_when_complete) + i40iw_rem_ref_cm_node(cm_node); + return ret; + } + } + + spin_lock_irqsave(&cm_core->ht_lock, flags); + was_timer_set = timer_pending(&cm_core->tcp_timer); + + if (!was_timer_set) { + cm_core->tcp_timer.expires = new_send->timetosend; + add_timer(&cm_core->tcp_timer); + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + return ret; +} + +/** + * i40iw_retrans_expired - Could not rexmit the packet + * @cm_node: connection's node + */ +static void i40iw_retrans_expired(struct i40iw_cm_node *cm_node) +{ + struct iw_cm_id *cm_id = cm_node->cm_id; + enum i40iw_cm_node_state state = cm_node->state; + + cm_node->state = I40IW_CM_STATE_CLOSED; + switch (state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_CLOSING: + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_LAST_ACK: + if (cm_node->cm_id) + cm_id->rem_ref(cm_id); + i40iw_send_reset(cm_node); + break; + default: + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); + break; + } +} + +/** + * i40iw_handle_close_entry - for handling retry/timeouts + * @cm_node: connection's node + * @rem_node: flag for remove cm_node + */ +static void i40iw_handle_close_entry(struct i40iw_cm_node *cm_node, u32 rem_node) +{ + struct i40iw_timer_entry *close_entry = cm_node->close_entry; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_qp *iwqp; + unsigned long flags; + + if (!close_entry) + return; + iwqp = (struct i40iw_qp *)close_entry->sqbuf; + if (iwqp) { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; + iwqp->hw_iwarp_state = I40IW_QP_STATE_ERROR; + iwqp->last_aeq = I40IW_AE_RESET_SENT; + iwqp->ibqp_state = IB_QPS_ERR; + spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); + } else { + spin_unlock_irqrestore(&iwqp->lock, flags); + } + } else if (rem_node) { + /* TIME_WAIT state */ + i40iw_rem_ref_cm_node(cm_node); + } + if (cm_id) + cm_id->rem_ref(cm_id); + kfree(close_entry); + cm_node->close_entry = NULL; +} + +/** + * i40iw_build_timer_list - Add cm_nodes to timer list + * @timer_list: ptr to timer list + * @hte: ptr to accelerated or non-accelerated list + */ +static void i40iw_build_timer_list(struct list_head *timer_list, + struct list_head *hte) +{ + struct i40iw_cm_node *cm_node; + struct list_head *list_core_temp, *list_node; + + list_for_each_safe(list_node, list_core_temp, hte) { + cm_node = container_of(list_node, struct i40iw_cm_node, list); + if (cm_node->close_entry || cm_node->send_entry) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->timer_entry, timer_list); + } + } +} + +/** + * i40iw_cm_timer_tick - system's timer expired callback + * @pass: Pointing to cm_core + */ +static void i40iw_cm_timer_tick(struct timer_list *t) +{ + unsigned long nexttimeout = jiffies + I40IW_LONG_TIME; + struct i40iw_cm_node *cm_node; + struct i40iw_timer_entry *send_entry, *close_entry; + struct list_head *list_core_temp; + struct i40iw_sc_vsi *vsi; + struct list_head *list_node; + struct i40iw_cm_core *cm_core = from_timer(cm_core, t, tcp_timer); + u32 settimer = 0; + unsigned long timetosend; + unsigned long flags; + + struct list_head timer_list; + + INIT_LIST_HEAD(&timer_list); + + spin_lock_irqsave(&cm_core->ht_lock, flags); + i40iw_build_timer_list(&timer_list, &cm_core->non_accelerated_list); + i40iw_build_timer_list(&timer_list, &cm_core->accelerated_list); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + list_for_each_safe(list_node, list_core_temp, &timer_list) { + cm_node = container_of(list_node, + struct i40iw_cm_node, + timer_entry); + close_entry = cm_node->close_entry; + + if (close_entry) { + if (time_after(close_entry->timetosend, jiffies)) { + if (nexttimeout > close_entry->timetosend || + !settimer) { + nexttimeout = close_entry->timetosend; + settimer = 1; + } + } else { + i40iw_handle_close_entry(cm_node, 1); + } + } + + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + + send_entry = cm_node->send_entry; + if (!send_entry) + goto done; + if (time_after(send_entry->timetosend, jiffies)) { + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { + if ((nexttimeout > send_entry->timetosend) || + !settimer) { + nexttimeout = send_entry->timetosend; + settimer = 1; + } + } else { + i40iw_free_retrans_entry(cm_node); + } + goto done; + } + + if ((cm_node->state == I40IW_CM_STATE_OFFLOADED) || + (cm_node->state == I40IW_CM_STATE_CLOSED)) { + i40iw_free_retrans_entry(cm_node); + goto done; + } + + if (!send_entry->retranscount || !send_entry->retrycount) { + i40iw_free_retrans_entry(cm_node); + + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + i40iw_retrans_expired(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + goto done; + } + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + + vsi = &cm_node->iwdev->vsi; + + if (!cm_node->ack_rcvd) { + atomic_inc(&send_entry->sqbuf->refcount); + i40iw_puda_send_buf(vsi->ilq, send_entry->sqbuf); + cm_node->cm_core->stats_pkt_retrans++; + } + spin_lock_irqsave(&cm_node->retrans_list_lock, flags); + if (send_entry->send_retrans) { + send_entry->retranscount--; + timetosend = (I40IW_RETRY_TIMEOUT << + (I40IW_DEFAULT_RETRANS - + send_entry->retranscount)); + + send_entry->timetosend = jiffies + + min(timetosend, I40IW_MAX_TIMEOUT); + if (nexttimeout > send_entry->timetosend || !settimer) { + nexttimeout = send_entry->timetosend; + settimer = 1; + } + } else { + int close_when_complete; + + close_when_complete = send_entry->close_when_complete; + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p state=%d\n", + cm_node, + cm_node->state); + i40iw_free_retrans_entry(cm_node); + if (close_when_complete) + i40iw_rem_ref_cm_node(cm_node); + } +done: + spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); + i40iw_rem_ref_cm_node(cm_node); + } + + if (settimer) { + spin_lock_irqsave(&cm_core->ht_lock, flags); + if (!timer_pending(&cm_core->tcp_timer)) { + cm_core->tcp_timer.expires = nexttimeout; + add_timer(&cm_core->tcp_timer); + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } +} + +/** + * i40iw_send_syn - send SYN packet + * @cm_node: connection's node + * @sendack: flag to set ACK bit or not + */ +int i40iw_send_syn(struct i40iw_cm_node *cm_node, u32 sendack) +{ + struct i40iw_puda_buf *sqbuf; + int flags = SET_SYN; + char optionsbuffer[sizeof(struct option_mss) + + sizeof(struct option_windowscale) + + sizeof(struct option_base) + TCP_OPTIONS_PADDING]; + struct i40iw_kmem_info opts; + + int optionssize = 0; + /* Sending MSS option */ + union all_known_options *options; + + opts.addr = optionsbuffer; + if (!cm_node) { + i40iw_pr_err("no cm_node\n"); + return -EINVAL; + } + + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_mss.optionnum = OPTION_NUMBER_MSS; + options->as_mss.length = sizeof(struct option_mss); + options->as_mss.mss = htons(cm_node->tcp_cntxt.mss); + optionssize += sizeof(struct option_mss); + + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_windowscale.optionnum = OPTION_NUMBER_WINDOW_SCALE; + options->as_windowscale.length = sizeof(struct option_windowscale); + options->as_windowscale.shiftcount = cm_node->tcp_cntxt.rcv_wscale; + optionssize += sizeof(struct option_windowscale); + options = (union all_known_options *)&optionsbuffer[optionssize]; + options->as_end = OPTION_NUMBER_END; + optionssize += 1; + + if (sendack) + flags |= SET_ACK; + + opts.size = optionssize; + + sqbuf = i40iw_form_cm_frame(cm_node, &opts, NULL, NULL, flags); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_send_ack - Send ACK packet + * @cm_node: connection's node + */ +static void i40iw_send_ack(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + struct i40iw_sc_vsi *vsi = &cm_node->iwdev->vsi; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK); + if (sqbuf) + i40iw_puda_send_buf(vsi->ilq, sqbuf); + else + i40iw_pr_err("no sqbuf\n"); +} + +/** + * i40iw_send_fin - Send FIN pkt + * @cm_node: connection's node + */ +static int i40iw_send_fin(struct i40iw_cm_node *cm_node) +{ + struct i40iw_puda_buf *sqbuf; + + sqbuf = i40iw_form_cm_frame(cm_node, NULL, NULL, NULL, SET_ACK | SET_FIN); + if (!sqbuf) { + i40iw_pr_err("no sqbuf\n"); + return -1; + } + return i40iw_schedule_cm_timer(cm_node, sqbuf, I40IW_TIMER_TYPE_SEND, 1, 0); +} + +/** + * i40iw_find_node - find a cm node that matches the reference cm node + * @cm_core: cm's core + * @rem_port: remote tcp port num + * @rem_addr: remote ip addr + * @loc_port: local tcp port num + * @loc_addr: loc ip addr + * @add_refcnt: flag to increment refcount of cm_node + * @accelerated_list: flag for accelerated vs non-accelerated list to search + */ +struct i40iw_cm_node *i40iw_find_node(struct i40iw_cm_core *cm_core, + u16 rem_port, + u32 *rem_addr, + u16 loc_port, + u32 *loc_addr, + bool add_refcnt, + bool accelerated_list) +{ + struct list_head *hte; + struct i40iw_cm_node *cm_node; + unsigned long flags; + + hte = accelerated_list ? + &cm_core->accelerated_list : &cm_core->non_accelerated_list; + + /* walk list and find cm_node associated with this session ID */ + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_entry(cm_node, hte, list) { + if (!memcmp(cm_node->loc_addr, loc_addr, sizeof(cm_node->loc_addr)) && + (cm_node->loc_port == loc_port) && + !memcmp(cm_node->rem_addr, rem_addr, sizeof(cm_node->rem_addr)) && + (cm_node->rem_port == rem_port)) { + if (add_refcnt) + atomic_inc(&cm_node->ref_count); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + return cm_node; + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + /* no owner node */ + return NULL; +} + +/** + * i40iw_find_listener - find a cm node listening on this addr-port pair + * @cm_core: cm's core + * @dst_port: listener tcp port num + * @dst_addr: listener ip addr + * @listener_state: state to match with listen node's + */ +static struct i40iw_cm_listener *i40iw_find_listener( + struct i40iw_cm_core *cm_core, + u32 *dst_addr, + u16 dst_port, + u16 vlan_id, + enum i40iw_cm_listener_state + listener_state) +{ + struct i40iw_cm_listener *listen_node; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + u32 listen_addr[4]; + u16 listen_port; + unsigned long flags; + + /* walk list and find cm_node associated with this session ID */ + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + memcpy(listen_addr, listen_node->loc_addr, sizeof(listen_addr)); + listen_port = listen_node->loc_port; + /* compare node pair, return node handle if a match */ + if ((!memcmp(listen_addr, dst_addr, sizeof(listen_addr)) || + !memcmp(listen_addr, ip_zero, sizeof(listen_addr))) && + (listen_port == dst_port) && + (listener_state & listen_node->listener_state)) { + atomic_inc(&listen_node->ref_count); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return listen_node; + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return NULL; +} + +/** + * i40iw_add_hte_node - add a cm node to the hash table + * @cm_core: cm's core + * @cm_node: connection's node + */ +static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core, + struct i40iw_cm_node *cm_node) +{ + unsigned long flags; + + if (!cm_node || !cm_core) { + i40iw_pr_err("cm_node or cm_core == NULL\n"); + return; + } + + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_add_tail(&cm_node->list, &cm_core->non_accelerated_list); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); +} + +/** + * i40iw_find_port - find port that matches reference port + * @hte: ptr to accelerated or non-accelerated list + * @accelerated_list: flag for accelerated vs non-accelerated list + */ +static bool i40iw_find_port(struct list_head *hte, u16 port) +{ + struct i40iw_cm_node *cm_node; + + list_for_each_entry(cm_node, hte, list) { + if (cm_node->loc_port == port) + return true; + } + return false; +} + +/** + * i40iw_port_in_use - determine if port is in use + * @cm_core: cm's core + * @port: port number + */ +bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port) +{ + struct i40iw_cm_listener *listen_node; + unsigned long flags; + + spin_lock_irqsave(&cm_core->ht_lock, flags); + if (i40iw_find_port(&cm_core->accelerated_list, port) || + i40iw_find_port(&cm_core->non_accelerated_list, port)) { + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + return true; + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (listen_node->loc_port == port) { + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + return true; + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + + return false; +} + +/** + * i40iw_del_multiple_qhash - Remove qhash and child listens + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + */ +static enum i40iw_status_code i40iw_del_multiple_qhash( + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct i40iw_cm_listener *child_listen_node; + enum i40iw_status_code ret = I40IW_ERR_CONFIG; + struct list_head *pos, *tpos; + unsigned long flags; + + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_for_each_safe(pos, tpos, &cm_parent_listen_node->child_listen_list) { + child_listen_node = list_entry(pos, struct i40iw_cm_listener, child_listen_list); + if (child_listen_node->ipv4) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "removing child listen for IP=%pI4, port=%d, vlan=%d\n", + child_listen_node->loc_addr, + child_listen_node->loc_port, + child_listen_node->vlan_id); + else + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, + "removing child listen for IP=%pI6, port=%d, vlan=%d\n", + child_listen_node->loc_addr, + child_listen_node->loc_port, + child_listen_node->vlan_id); + list_del(pos); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + cm_info->vlan_id = child_listen_node->vlan_id; + if (child_listen_node->qhash_set) { + ret = i40iw_manage_qhash(iwdev, cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, false); + child_listen_node->qhash_set = false; + } else { + ret = I40IW_SUCCESS; + } + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "freed pointer = %p\n", + child_listen_node); + kfree(child_listen_node); + cm_parent_listen_node->cm_core->stats_listen_nodes_destroyed++; + } + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + + return ret; +} + +/** + * i40iw_netdev_vlan_ipv6 - Gets the netdev and vlan + * @addr: local IPv6 address + * @vlan_id: vlan id for the given IPv6 address + * + * Returns the net_device of the IPv6 address and also sets the + * vlan id for that address. + */ +static struct net_device *i40iw_netdev_vlan_ipv6(u32 *addr, u16 *vlan_id) +{ + struct net_device *ip_dev = NULL; + struct in6_addr laddr6; + + if (!IS_ENABLED(CONFIG_IPV6)) + return NULL; + i40iw_copy_ip_htonl(laddr6.in6_u.u6_addr32, addr); + if (vlan_id) + *vlan_id = I40IW_NO_VLAN; + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if (ipv6_chk_addr(&init_net, &laddr6, ip_dev, 1)) { + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + break; + } + } + rcu_read_unlock(); + return ip_dev; +} + +/** + * i40iw_get_vlan_ipv4 - Returns the vlan_id for IPv4 address + * @addr: local IPv4 address + */ +static u16 i40iw_get_vlan_ipv4(u32 *addr) +{ + struct net_device *netdev; + u16 vlan_id = I40IW_NO_VLAN; + + netdev = ip_dev_find(&init_net, htonl(addr[0])); + if (netdev) { + vlan_id = rdma_vlan_dev_vlan_id(netdev); + dev_put(netdev); + } + return vlan_id; +} + +/** + * i40iw_add_mqh_6 - Adds multiple qhashes for IPv6 + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + * + * Adds a qhash and a child listen node for every IPv6 address + * on the adapter and adds the associated qhash filter + */ +static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct net_device *ip_dev; + struct inet6_dev *idev; + struct inet6_ifaddr *ifp, *tmp; + enum i40iw_status_code ret = 0; + struct i40iw_cm_listener *child_listen_node; + unsigned long flags; + + rtnl_lock(); + for_each_netdev(&init_net, ip_dev) { + if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) && + (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || + (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { + idev = __in6_dev_get(ip_dev); + if (!idev) { + i40iw_pr_err("idev == NULL\n"); + break; + } + list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "IP=%pI6, vlan_id=%d, MAC=%pM\n", + &ifp->addr, + rdma_vlan_dev_vlan_id(ip_dev), + ip_dev->dev_addr); + child_listen_node = + kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child listener %p\n", + child_listen_node); + if (!child_listen_node) { + ret = I40IW_ERR_NO_MEMORY; + goto exit; + } + cm_info->vlan_id = rdma_vlan_dev_vlan_id(ip_dev); + cm_parent_listen_node->vlan_id = cm_info->vlan_id; + + memcpy(child_listen_node, cm_parent_listen_node, + sizeof(*child_listen_node)); + + i40iw_copy_ip_ntohl(child_listen_node->loc_addr, + ifp->addr.in6_u.u6_addr32); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + + ret = i40iw_manage_qhash(iwdev, cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, true); + if (!ret) { + child_listen_node->qhash_set = true; + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_add(&child_listen_node->child_listen_list, + &cm_parent_listen_node->child_listen_list); + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + cm_parent_listen_node->cm_core->stats_listen_nodes_created++; + } else { + kfree(child_listen_node); + } + } + } + } +exit: + rtnl_unlock(); + return ret; +} + +/** + * i40iw_add_mqh_4 - Adds multiple qhashes for IPv4 + * @iwdev: iWarp device + * @cm_info: CM info for parent listen node + * @cm_parent_listen_node: The parent listen node + * + * Adds a qhash and a child listen node for every IPv4 address + * on the adapter and adds the associated qhash filter + */ +static enum i40iw_status_code i40iw_add_mqh_4( + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *cm_parent_listen_node) +{ + struct net_device *dev; + struct in_device *idev; + struct i40iw_cm_listener *child_listen_node; + enum i40iw_status_code ret = 0; + unsigned long flags; + + rtnl_lock(); + for_each_netdev(&init_net, dev) { + if ((((rdma_vlan_dev_vlan_id(dev) < I40IW_NO_VLAN) && + (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || + (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + idev = in_dev_get(dev); + for_ifa(idev) { + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child CM Listener forIP=%pI4, vlan_id=%d, MAC=%pM\n", + &ifa->ifa_address, + rdma_vlan_dev_vlan_id(dev), + dev->dev_addr); + child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_KERNEL); + cm_parent_listen_node->cm_core->stats_listen_nodes_created++; + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Allocating child listener %p\n", + child_listen_node); + if (!child_listen_node) { + in_dev_put(idev); + ret = I40IW_ERR_NO_MEMORY; + goto exit; + } + cm_info->vlan_id = rdma_vlan_dev_vlan_id(dev); + cm_parent_listen_node->vlan_id = cm_info->vlan_id; + memcpy(child_listen_node, + cm_parent_listen_node, + sizeof(*child_listen_node)); + + child_listen_node->loc_addr[0] = ntohl(ifa->ifa_address); + memcpy(cm_info->loc_addr, child_listen_node->loc_addr, + sizeof(cm_info->loc_addr)); + + ret = i40iw_manage_qhash(iwdev, + cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, + true); + if (!ret) { + child_listen_node->qhash_set = true; + spin_lock_irqsave(&iwdev->cm_core.listen_list_lock, flags); + list_add(&child_listen_node->child_listen_list, + &cm_parent_listen_node->child_listen_list); + spin_unlock_irqrestore(&iwdev->cm_core.listen_list_lock, flags); + } else { + kfree(child_listen_node); + cm_parent_listen_node->cm_core->stats_listen_nodes_created--; + } + } + endfor_ifa(idev); + in_dev_put(idev); + } + } +exit: + rtnl_unlock(); + return ret; +} + +/** + * i40iw_dec_refcnt_listen - delete listener and associated cm nodes + * @cm_core: cm's core + * @free_hanging_nodes: to free associated cm_nodes + * @apbvt_del: flag to delete the apbvt + */ +static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core, + struct i40iw_cm_listener *listener, + int free_hanging_nodes, bool apbvt_del) +{ + int ret = -EINVAL; + int err = 0; + struct list_head *list_pos; + struct list_head *list_temp; + struct i40iw_cm_node *cm_node; + struct list_head reset_list; + struct i40iw_cm_info nfo; + struct i40iw_cm_node *loopback; + enum i40iw_cm_node_state old_state; + unsigned long flags; + + /* free non-accelerated child nodes for this listener */ + INIT_LIST_HEAD(&reset_list); + if (free_hanging_nodes) { + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_safe(list_pos, + list_temp, &cm_core->non_accelerated_list) { + cm_node = container_of(list_pos, struct i40iw_cm_node, list); + if ((cm_node->listener == listener) && + !cm_node->accelerated) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->reset_entry, &reset_list); + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + } + + list_for_each_safe(list_pos, list_temp, &reset_list) { + cm_node = container_of(list_pos, struct i40iw_cm_node, reset_entry); + loopback = cm_node->loopbackpartner; + if (cm_node->state >= I40IW_CM_STATE_FIN_WAIT1) { + i40iw_rem_ref_cm_node(cm_node); + } else { + if (!loopback) { + i40iw_cleanup_retrans_entry(cm_node); + err = i40iw_send_reset(cm_node); + if (err) { + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_pr_err("send reset\n"); + } else { + old_state = cm_node->state; + cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; + if (old_state != I40IW_CM_STATE_MPAREQ_RCVD) + i40iw_rem_ref_cm_node(cm_node); + } + } else { + struct i40iw_cm_event event; + + event.cm_node = loopback; + memcpy(event.cm_info.rem_addr, + loopback->rem_addr, sizeof(event.cm_info.rem_addr)); + memcpy(event.cm_info.loc_addr, + loopback->loc_addr, sizeof(event.cm_info.loc_addr)); + event.cm_info.rem_port = loopback->rem_port; + event.cm_info.loc_port = loopback->loc_port; + event.cm_info.cm_id = loopback->cm_id; + event.cm_info.ipv4 = loopback->ipv4; + atomic_inc(&loopback->ref_count); + loopback->state = I40IW_CM_STATE_CLOSED; + i40iw_event_connect_error(&event); + cm_node->state = I40IW_CM_STATE_LISTENER_DESTROYED; + i40iw_rem_ref_cm_node(cm_node); + } + } + } + + if (!atomic_dec_return(&listener->ref_count)) { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_del(&listener->list); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + + if (listener->iwdev) { + if (apbvt_del) + i40iw_manage_apbvt(listener->iwdev, + listener->loc_port, + I40IW_MANAGE_APBVT_DEL); + + memcpy(nfo.loc_addr, listener->loc_addr, sizeof(nfo.loc_addr)); + nfo.loc_port = listener->loc_port; + nfo.ipv4 = listener->ipv4; + nfo.vlan_id = listener->vlan_id; + nfo.user_pri = listener->user_pri; + + if (!list_empty(&listener->child_listen_list)) { + i40iw_del_multiple_qhash(listener->iwdev, &nfo, listener); + } else { + if (listener->qhash_set) + i40iw_manage_qhash(listener->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + } + } + + cm_core->stats_listen_destroyed++; + kfree(listener); + cm_core->stats_listen_nodes_destroyed++; + listener = NULL; + ret = 0; + } + + if (listener) { + if (atomic_read(&listener->pend_accepts_cnt) > 0) + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s: listener (%p) pending accepts=%u\n", + __func__, + listener, + atomic_read(&listener->pend_accepts_cnt)); + } + + return ret; +} + +/** + * i40iw_cm_del_listen - delete a linstener + * @cm_core: cm's core + * @listener: passive connection's listener + * @apbvt_del: flag to delete apbvt + */ +static int i40iw_cm_del_listen(struct i40iw_cm_core *cm_core, + struct i40iw_cm_listener *listener, + bool apbvt_del) +{ + listener->listener_state = I40IW_CM_LISTENER_PASSIVE_STATE; + listener->cm_id = NULL; /* going to be destroyed pretty soon */ + return i40iw_dec_refcnt_listen(cm_core, listener, 1, apbvt_del); +} + +/** + * i40iw_addr_resolve_neigh - resolve neighbor address + * @iwdev: iwarp device structure + * @src_ip: local ip address + * @dst_ip: remote ip address + * @arpindex: if there is an arp entry + */ +static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, + u32 src_ip, + u32 dst_ip, + int arpindex) +{ + struct rtable *rt; + struct neighbour *neigh; + int rc = arpindex; + __be32 dst_ipaddr = htonl(dst_ip); + __be32 src_ipaddr = htonl(src_ip); + + rt = ip_route_output(&init_net, dst_ipaddr, src_ipaddr, 0, 0); + if (IS_ERR(rt)) { + i40iw_pr_err("ip_route_output\n"); + return rc; + } + + neigh = dst_neigh_lookup(&rt->dst, &dst_ipaddr); + + rcu_read_lock(); + if (neigh) { + if (neigh->nud_state & NUD_VALID) { + if (arpindex >= 0) { + if (ether_addr_equal(iwdev->arp_table[arpindex].mac_addr, + neigh->ha)) + /* Mac address same as arp table */ + goto resolve_neigh_exit; + i40iw_manage_arp_cache(iwdev, + iwdev->arp_table[arpindex].mac_addr, + &dst_ip, + true, + I40IW_ARP_DELETE); + } + + i40iw_manage_arp_cache(iwdev, neigh->ha, &dst_ip, true, I40IW_ARP_ADD); + rc = i40iw_arp_table(iwdev, &dst_ip, true, NULL, I40IW_ARP_RESOLVE); + } else { + neigh_event_send(neigh, NULL); + } + } + resolve_neigh_exit: + + rcu_read_unlock(); + if (neigh) + neigh_release(neigh); + + ip_rt_put(rt); + return rc; +} + +/** + * i40iw_get_dst_ipv6 + */ +static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, + struct sockaddr_in6 *dst_addr) +{ + struct dst_entry *dst; + struct flowi6 fl6; + + memset(&fl6, 0, sizeof(fl6)); + fl6.daddr = dst_addr->sin6_addr; + fl6.saddr = src_addr->sin6_addr; + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = dst_addr->sin6_scope_id; + + dst = ip6_route_output(&init_net, NULL, &fl6); + return dst; +} + +/** + * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address + * @iwdev: iwarp device structure + * @dst_ip: remote ip address + * @arpindex: if there is an arp entry + */ +static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, + u32 *src, + u32 *dest, + int arpindex) +{ + struct neighbour *neigh; + int rc = arpindex; + struct dst_entry *dst; + struct sockaddr_in6 dst_addr; + struct sockaddr_in6 src_addr; + + memset(&dst_addr, 0, sizeof(dst_addr)); + dst_addr.sin6_family = AF_INET6; + i40iw_copy_ip_htonl(dst_addr.sin6_addr.in6_u.u6_addr32, dest); + memset(&src_addr, 0, sizeof(src_addr)); + src_addr.sin6_family = AF_INET6; + i40iw_copy_ip_htonl(src_addr.sin6_addr.in6_u.u6_addr32, src); + dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr); + if (!dst || dst->error) { + if (dst) { + i40iw_pr_err("ip6_route_output returned dst->error = %d\n", + dst->error); + dst_release(dst); + } + return rc; + } + + neigh = dst_neigh_lookup(dst, dst_addr.sin6_addr.in6_u.u6_addr32); + + rcu_read_lock(); + if (neigh) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "dst_neigh_lookup MAC=%pM\n", neigh->ha); + if (neigh->nud_state & NUD_VALID) { + if (arpindex >= 0) { + if (ether_addr_equal + (iwdev->arp_table[arpindex].mac_addr, + neigh->ha)) { + /* Mac address same as in arp table */ + goto resolve_neigh_exit6; + } + i40iw_manage_arp_cache(iwdev, + iwdev->arp_table[arpindex].mac_addr, + dest, + false, + I40IW_ARP_DELETE); + } + i40iw_manage_arp_cache(iwdev, + neigh->ha, + dest, + false, + I40IW_ARP_ADD); + rc = i40iw_arp_table(iwdev, + dest, + false, + NULL, + I40IW_ARP_RESOLVE); + } else { + neigh_event_send(neigh, NULL); + } + } + + resolve_neigh_exit6: + rcu_read_unlock(); + if (neigh) + neigh_release(neigh); + dst_release(dst); + return rc; +} + +/** + * i40iw_ipv4_is_loopback - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +static bool i40iw_ipv4_is_loopback(u32 loc_addr, u32 rem_addr) +{ + return ipv4_is_loopback(htonl(rem_addr)) || (loc_addr == rem_addr); +} + +/** + * i40iw_ipv6_is_loopback - check if loopback + * @loc_addr: local addr to compare + * @rem_addr: remote address + */ +static bool i40iw_ipv6_is_loopback(u32 *loc_addr, u32 *rem_addr) +{ + struct in6_addr raddr6; + + i40iw_copy_ip_htonl(raddr6.in6_u.u6_addr32, rem_addr); + return !memcmp(loc_addr, rem_addr, 16) || ipv6_addr_loopback(&raddr6); +} + +/** + * i40iw_make_cm_node - create a new instance of a cm node + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @cm_info: quad info for connection + * @listener: passive connection's listener + */ +static struct i40iw_cm_node *i40iw_make_cm_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info, + struct i40iw_cm_listener *listener) +{ + struct i40iw_cm_node *cm_node; + int oldarpindex; + int arpindex; + struct net_device *netdev = iwdev->netdev; + + /* create an hte and cm_node for this instance */ + cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC); + if (!cm_node) + return NULL; + + /* set our node specific transport info */ + cm_node->ipv4 = cm_info->ipv4; + cm_node->vlan_id = cm_info->vlan_id; + if ((cm_node->vlan_id == I40IW_NO_VLAN) && iwdev->dcb) + cm_node->vlan_id = 0; + cm_node->tos = cm_info->tos; + cm_node->user_pri = cm_info->user_pri; + if (listener) { + if (listener->tos != cm_info->tos) + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, + "application TOS[%d] and remote client TOS[%d] mismatch\n", + listener->tos, cm_info->tos); + cm_node->tos = max(listener->tos, cm_info->tos); + cm_node->user_pri = rt_tos2priority(cm_node->tos); + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "listener: TOS:[%d] UP:[%d]\n", + cm_node->tos, cm_node->user_pri); + } + memcpy(cm_node->loc_addr, cm_info->loc_addr, sizeof(cm_node->loc_addr)); + memcpy(cm_node->rem_addr, cm_info->rem_addr, sizeof(cm_node->rem_addr)); + cm_node->loc_port = cm_info->loc_port; + cm_node->rem_port = cm_info->rem_port; + + cm_node->mpa_frame_rev = iwdev->mpa_version; + cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; + cm_node->ird_size = I40IW_MAX_IRD_SIZE; + cm_node->ord_size = I40IW_MAX_ORD_SIZE; + + cm_node->listener = listener; + cm_node->cm_id = cm_info->cm_id; + ether_addr_copy(cm_node->loc_mac, netdev->dev_addr); + spin_lock_init(&cm_node->retrans_list_lock); + cm_node->ack_rcvd = false; + + atomic_set(&cm_node->ref_count, 1); + /* associate our parent CM core */ + cm_node->cm_core = cm_core; + cm_node->tcp_cntxt.loc_id = I40IW_CM_DEF_LOCAL_ID; + cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; + cm_node->tcp_cntxt.rcv_wnd = + I40IW_CM_DEFAULT_RCV_WND_SCALED >> I40IW_CM_DEFAULT_RCV_WND_SCALE; + if (cm_node->ipv4) { + cm_node->tcp_cntxt.loc_seq_num = secure_tcp_seq(htonl(cm_node->loc_addr[0]), + htonl(cm_node->rem_addr[0]), + htons(cm_node->loc_port), + htons(cm_node->rem_port)); + cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV4; + } else if (IS_ENABLED(CONFIG_IPV6)) { + __be32 loc[4] = { + htonl(cm_node->loc_addr[0]), htonl(cm_node->loc_addr[1]), + htonl(cm_node->loc_addr[2]), htonl(cm_node->loc_addr[3]) + }; + __be32 rem[4] = { + htonl(cm_node->rem_addr[0]), htonl(cm_node->rem_addr[1]), + htonl(cm_node->rem_addr[2]), htonl(cm_node->rem_addr[3]) + }; + cm_node->tcp_cntxt.loc_seq_num = secure_tcpv6_seq(loc, rem, + htons(cm_node->loc_port), + htons(cm_node->rem_port)); + cm_node->tcp_cntxt.mss = iwdev->vsi.mtu - I40IW_MTU_TO_MSS_IPV6; + } + + cm_node->iwdev = iwdev; + cm_node->dev = &iwdev->sc_dev; + + if ((cm_node->ipv4 && + i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || + (!cm_node->ipv4 && i40iw_ipv6_is_loopback(cm_node->loc_addr, + cm_node->rem_addr))) { + arpindex = i40iw_arp_table(iwdev, + cm_node->rem_addr, + false, + NULL, + I40IW_ARP_RESOLVE); + } else { + oldarpindex = i40iw_arp_table(iwdev, + cm_node->rem_addr, + false, + NULL, + I40IW_ARP_RESOLVE); + if (cm_node->ipv4) + arpindex = i40iw_addr_resolve_neigh(iwdev, + cm_info->loc_addr[0], + cm_info->rem_addr[0], + oldarpindex); + else if (IS_ENABLED(CONFIG_IPV6)) + arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, + cm_info->loc_addr, + cm_info->rem_addr, + oldarpindex); + else + arpindex = -EINVAL; + } + if (arpindex < 0) { + i40iw_pr_err("cm_node arpindex\n"); + kfree(cm_node); + return NULL; + } + ether_addr_copy(cm_node->rem_mac, iwdev->arp_table[arpindex].mac_addr); + i40iw_add_hte_node(cm_core, cm_node); + cm_core->stats_nodes_created++; + return cm_node; +} + +/** + * i40iw_rem_ref_cm_node - destroy an instance of a cm node + * @cm_node: connection's node + */ +static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node) +{ + struct i40iw_cm_core *cm_core = cm_node->cm_core; + struct i40iw_qp *iwqp; + struct i40iw_cm_info nfo; + unsigned long flags; + + spin_lock_irqsave(&cm_node->cm_core->ht_lock, flags); + if (atomic_dec_return(&cm_node->ref_count)) { + spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); + return; + } + list_del(&cm_node->list); + spin_unlock_irqrestore(&cm_node->cm_core->ht_lock, flags); + + /* if the node is destroyed before connection was accelerated */ + if (!cm_node->accelerated && cm_node->accept_pend) { + pr_err("node destroyed before established\n"); + atomic_dec(&cm_node->listener->pend_accepts_cnt); + } + if (cm_node->close_entry) + i40iw_handle_close_entry(cm_node, 0); + if (cm_node->listener) { + i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); + } else { + if (cm_node->apbvt_set) { + i40iw_manage_apbvt(cm_node->iwdev, + cm_node->loc_port, + I40IW_MANAGE_APBVT_DEL); + cm_node->apbvt_set = 0; + } + i40iw_get_addr_info(cm_node, &nfo); + if (cm_node->qhash_set) { + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; + } + } + + iwqp = cm_node->iwqp; + if (iwqp) { + iwqp->cm_node = NULL; + i40iw_rem_ref(&iwqp->ibqp); + cm_node->iwqp = NULL; + } else if (cm_node->qhash_set) { + i40iw_get_addr_info(cm_node, &nfo); + i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_DELETE, + NULL, + false); + cm_node->qhash_set = 0; + } + + cm_node->cm_core->stats_nodes_destroyed++; + kfree(cm_node); +} + +/** + * i40iw_handle_fin_pkt - FIN packet received + * @cm_node: connection's node + */ +static void i40iw_handle_fin_pkt(struct i40iw_cm_node *cm_node) +{ + u32 ret; + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_MPAREJ_RCVD: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_LAST_ACK; + i40iw_send_fin(cm_node); + break; + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_create_event(cm_node, I40IW_CM_EVENT_ABORTED); + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSING; + i40iw_send_ack(cm_node); + /* + * Wait for ACK as this is simultaneous close. + * After we receive ACK, do not send anything. + * Just rm the node. + */ + break; + case I40IW_CM_STATE_FIN_WAIT2: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_TIME_WAIT; + i40iw_send_ack(cm_node); + ret = + i40iw_schedule_cm_timer(cm_node, NULL, I40IW_TIMER_TYPE_CLOSE, 1, 0); + if (ret) + i40iw_pr_err("node %p state = %d\n", cm_node, cm_node->state); + break; + case I40IW_CM_STATE_TIME_WAIT: + cm_node->tcp_cntxt.rcv_nxt++; + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + default: + i40iw_pr_err("bad state node %p state = %d\n", cm_node, cm_node->state); + break; + } +} + +/** + * i40iw_handle_rst_pkt - process received RST packet + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_rst_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + i40iw_cleanup_retrans_entry(cm_node); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + switch (cm_node->mpa_frame_rev) { + case IETF_MPA_V2: + cm_node->mpa_frame_rev = IETF_MPA_V1; + /* send a syn and goto syn sent state */ + cm_node->state = I40IW_CM_STATE_SYN_SENT; + if (i40iw_send_syn(cm_node, 0)) + i40iw_active_open_err(cm_node, false); + break; + case IETF_MPA_V1: + default: + i40iw_active_open_err(cm_node, false); + break; + } + break; + case I40IW_CM_STATE_MPAREQ_RCVD: + atomic_add_return(1, &cm_node->passive_state); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_LISTENING: + i40iw_pr_err("Bad state state = %d\n", cm_node->state); + i40iw_passive_open_err(cm_node, false); + break; + case I40IW_CM_STATE_OFFLOADED: + i40iw_active_open_err(cm_node, false); + break; + case I40IW_CM_STATE_CLOSED: + break; + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_LAST_ACK: + cm_node->cm_id->rem_ref(cm_node->cm_id); + /* fall through */ + case I40IW_CM_STATE_TIME_WAIT: + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + break; + default: + break; + } +} + +/** + * i40iw_handle_rcv_mpa - Process a recv'd mpa buffer + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + int ret; + int datasize = rbuf->datalen; + u8 *dataloc = rbuf->data; + + enum i40iw_cm_event_type type = I40IW_CM_EVENT_UNKNOWN; + u32 res_type; + + ret = i40iw_parse_mpa(cm_node, dataloc, &res_type, datasize); + if (ret) { + if (cm_node->state == I40IW_CM_STATE_MPAREQ_SENT) + i40iw_active_open_err(cm_node, true); + else + i40iw_passive_open_err(cm_node, true); + return; + } + + switch (cm_node->state) { + case I40IW_CM_STATE_ESTABLISHED: + if (res_type == I40IW_MPA_REQUEST_REJECT) + i40iw_pr_err("state for reject\n"); + cm_node->state = I40IW_CM_STATE_MPAREQ_RCVD; + type = I40IW_CM_EVENT_MPA_REQ; + i40iw_send_ack(cm_node); /* ACK received MPA request */ + atomic_set(&cm_node->passive_state, + I40IW_PASSIVE_STATE_INDICATED); + break; + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_cleanup_retrans_entry(cm_node); + if (res_type == I40IW_MPA_REQUEST_REJECT) { + type = I40IW_CM_EVENT_MPA_REJECT; + cm_node->state = I40IW_CM_STATE_MPAREJ_RCVD; + } else { + type = I40IW_CM_EVENT_CONNECTED; + cm_node->state = I40IW_CM_STATE_OFFLOADED; + } + i40iw_send_ack(cm_node); + break; + default: + pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); + break; + } + i40iw_create_event(cm_node, type); +} + +/** + * i40iw_indicate_pkt_err - Send up err event to cm + * @cm_node: connection's node + */ +static void i40iw_indicate_pkt_err(struct i40iw_cm_node *cm_node) +{ + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + i40iw_active_open_err(cm_node, true); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_SYN_RCVD: + i40iw_passive_open_err(cm_node, true); + break; + case I40IW_CM_STATE_OFFLOADED: + default: + break; + } +} + +/** + * i40iw_check_syn - Check for error on received syn ack + * @cm_node: connection's node + * @tcph: pointer tcp header + */ +static int i40iw_check_syn(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) +{ + int err = 0; + + if (ntohl(tcph->ack_seq) != cm_node->tcp_cntxt.loc_seq_num) { + err = 1; + i40iw_active_open_err(cm_node, true); + } + return err; +} + +/** + * i40iw_check_seq - check seq numbers if OK + * @cm_node: connection's node + * @tcph: pointer tcp header + */ +static int i40iw_check_seq(struct i40iw_cm_node *cm_node, struct tcphdr *tcph) +{ + int err = 0; + u32 seq; + u32 ack_seq; + u32 loc_seq_num = cm_node->tcp_cntxt.loc_seq_num; + u32 rcv_nxt = cm_node->tcp_cntxt.rcv_nxt; + u32 rcv_wnd; + + seq = ntohl(tcph->seq); + ack_seq = ntohl(tcph->ack_seq); + rcv_wnd = cm_node->tcp_cntxt.rcv_wnd; + if (ack_seq != loc_seq_num) + err = -1; + else if (!between(seq, rcv_nxt, (rcv_nxt + rcv_wnd))) + err = -1; + if (err) { + i40iw_pr_err("seq number\n"); + i40iw_indicate_pkt_err(cm_node); + } + return err; +} + +/** + * i40iw_handle_syn_pkt - is for Passive node + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_syn_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + int ret; + u32 inc_sequence; + int optionsize; + struct i40iw_cm_info nfo; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + inc_sequence = ntohl(tcph->seq); + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_MPAREQ_SENT: + /* Rcvd syn on active open connection */ + i40iw_active_open_err(cm_node, 1); + break; + case I40IW_CM_STATE_LISTENING: + /* Passive OPEN */ + if (atomic_read(&cm_node->listener->pend_accepts_cnt) > + cm_node->listener->backlog) { + cm_node->cm_core->stats_backlog_drops++; + i40iw_passive_open_err(cm_node, false); + break; + } + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); + if (ret) { + i40iw_passive_open_err(cm_node, false); + /* drop pkt */ + break; + } + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; + cm_node->accept_pend = 1; + atomic_inc(&cm_node->listener->pend_accepts_cnt); + + cm_node->state = I40IW_CM_STATE_SYN_RCVD; + i40iw_get_addr_info(cm_node, &nfo); + ret = i40iw_manage_qhash(cm_node->iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, + (void *)cm_node, + false); + cm_node->qhash_set = true; + break; + case I40IW_CM_STATE_CLOSED: + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_MPAREQ_RCVD: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_CLOSING: + case I40IW_CM_STATE_UNKNOWN: + default: + break; + } +} + +/** + * i40iw_handle_synack_pkt - Process SYN+ACK packet (active side) + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_handle_synack_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + int ret; + u32 inc_sequence; + int optionsize; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + inc_sequence = ntohl(tcph->seq); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_SENT: + i40iw_cleanup_retrans_entry(cm_node); + /* active open */ + if (i40iw_check_syn(cm_node, tcph)) { + i40iw_pr_err("check syn fail\n"); + return; + } + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + /* setup options */ + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 0); + if (ret) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p tcp_options failed\n", + cm_node); + break; + } + i40iw_cleanup_retrans_entry(cm_node); + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + 1; + i40iw_send_ack(cm_node); /* ACK for the syn_ack */ + ret = i40iw_send_mpa_request(cm_node); + if (ret) { + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "cm_node=%p i40iw_send_mpa_request failed\n", + cm_node); + break; + } + cm_node->state = I40IW_CM_STATE_MPAREQ_SENT; + break; + case I40IW_CM_STATE_MPAREQ_RCVD: + i40iw_passive_open_err(cm_node, true); + break; + case I40IW_CM_STATE_LISTENING: + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSED: + cm_node->tcp_cntxt.loc_seq_num = ntohl(tcph->ack_seq); + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_CLOSING: + case I40IW_CM_STATE_UNKNOWN: + case I40IW_CM_STATE_MPAREQ_SENT: + default: + break; + } +} + +/** + * i40iw_handle_ack_pkt - process packet with ACK + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static int i40iw_handle_ack_pkt(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + u32 inc_sequence; + int ret = 0; + int optionsize; + u32 datasize = rbuf->datalen; + + optionsize = (tcph->doff << 2) - sizeof(struct tcphdr); + + if (i40iw_check_seq(cm_node, tcph)) + return -EINVAL; + + inc_sequence = ntohl(tcph->seq); + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + i40iw_cleanup_retrans_entry(cm_node); + ret = i40iw_handle_tcp_options(cm_node, tcph, optionsize, 1); + if (ret) + break; + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + cm_node->state = I40IW_CM_STATE_ESTABLISHED; + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } + break; + case I40IW_CM_STATE_ESTABLISHED: + i40iw_cleanup_retrans_entry(cm_node); + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } + break; + case I40IW_CM_STATE_MPAREQ_SENT: + cm_node->tcp_cntxt.rem_ack_num = ntohl(tcph->ack_seq); + if (datasize) { + cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize; + cm_node->ack_rcvd = false; + i40iw_handle_rcv_mpa(cm_node, rbuf); + } else { + cm_node->ack_rcvd = true; + } + break; + case I40IW_CM_STATE_LISTENING: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSED: + i40iw_cleanup_retrans_entry(cm_node); + atomic_inc(&cm_node->ref_count); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_CLOSING: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_CLOSED; + if (!cm_node->accept_pend) + cm_node->cm_id->rem_ref(cm_node->cm_id); + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + i40iw_cleanup_retrans_entry(cm_node); + cm_node->state = I40IW_CM_STATE_FIN_WAIT2; + break; + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_OFFLOADED: + case I40IW_CM_STATE_MPAREQ_RCVD: + case I40IW_CM_STATE_UNKNOWN: + default: + i40iw_cleanup_retrans_entry(cm_node); + break; + } + return ret; +} + +/** + * i40iw_process_packet - process cm packet + * @cm_node: connection's node + * @rbuf: receive buffer + */ +static void i40iw_process_packet(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *rbuf) +{ + enum i40iw_tcpip_pkt_type pkt_type = I40IW_PKT_TYPE_UNKNOWN; + struct tcphdr *tcph = (struct tcphdr *)rbuf->tcph; + u32 fin_set = 0; + int ret; + + if (tcph->rst) { + pkt_type = I40IW_PKT_TYPE_RST; + } else if (tcph->syn) { + pkt_type = I40IW_PKT_TYPE_SYN; + if (tcph->ack) + pkt_type = I40IW_PKT_TYPE_SYNACK; + } else if (tcph->ack) { + pkt_type = I40IW_PKT_TYPE_ACK; + } + if (tcph->fin) + fin_set = 1; + + switch (pkt_type) { + case I40IW_PKT_TYPE_SYN: + i40iw_handle_syn_pkt(cm_node, rbuf); + break; + case I40IW_PKT_TYPE_SYNACK: + i40iw_handle_synack_pkt(cm_node, rbuf); + break; + case I40IW_PKT_TYPE_ACK: + ret = i40iw_handle_ack_pkt(cm_node, rbuf); + if (fin_set && !ret) + i40iw_handle_fin_pkt(cm_node); + break; + case I40IW_PKT_TYPE_RST: + i40iw_handle_rst_pkt(cm_node, rbuf); + break; + default: + if (fin_set && + (!i40iw_check_seq(cm_node, (struct tcphdr *)rbuf->tcph))) + i40iw_handle_fin_pkt(cm_node); + break; + } +} + +/** + * i40iw_make_listen_node - create a listen node with params + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @cm_info: quad info for connection + */ +static struct i40iw_cm_listener *i40iw_make_listen_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + struct i40iw_cm_info *cm_info) +{ + struct i40iw_cm_listener *listener; + unsigned long flags; + + /* cannot have multiple matching listeners */ + listener = i40iw_find_listener(cm_core, cm_info->loc_addr, + cm_info->loc_port, + cm_info->vlan_id, + I40IW_CM_LISTENER_EITHER_STATE); + if (listener && + (listener->listener_state == I40IW_CM_LISTENER_ACTIVE_STATE)) { + atomic_dec(&listener->ref_count); + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "Not creating listener since it already exists\n"); + return NULL; + } + + if (!listener) { + /* create a CM listen node (1/2 node to compare incoming traffic to) */ + listener = kzalloc(sizeof(*listener), GFP_KERNEL); + if (!listener) + return NULL; + cm_core->stats_listen_nodes_created++; + memcpy(listener->loc_addr, cm_info->loc_addr, sizeof(listener->loc_addr)); + listener->loc_port = cm_info->loc_port; + + INIT_LIST_HEAD(&listener->child_listen_list); + + atomic_set(&listener->ref_count, 1); + } else { + listener->reused_node = 1; + } + + listener->cm_id = cm_info->cm_id; + listener->ipv4 = cm_info->ipv4; + listener->vlan_id = cm_info->vlan_id; + atomic_set(&listener->pend_accepts_cnt, 0); + listener->cm_core = cm_core; + listener->iwdev = iwdev; + + listener->backlog = cm_info->backlog; + listener->listener_state = I40IW_CM_LISTENER_ACTIVE_STATE; + + if (!listener->reused_node) { + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_add(&listener->list, &cm_core->listen_nodes); + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + } + + return listener; +} + +/** + * i40iw_create_cm_node - make a connection node with params + * @cm_core: cm's core + * @iwdev: iwarp device structure + * @conn_param: upper layer connection parameters + * @cm_info: quad info for connection + */ +static struct i40iw_cm_node *i40iw_create_cm_node( + struct i40iw_cm_core *cm_core, + struct i40iw_device *iwdev, + struct iw_cm_conn_param *conn_param, + struct i40iw_cm_info *cm_info) +{ + struct i40iw_cm_node *cm_node; + struct i40iw_cm_listener *loopback_remotelistener; + struct i40iw_cm_node *loopback_remotenode; + struct i40iw_cm_info loopback_cm_info; + + u16 private_data_len = conn_param->private_data_len; + const void *private_data = conn_param->private_data; + + /* create a CM connection node */ + cm_node = i40iw_make_cm_node(cm_core, iwdev, cm_info, NULL); + if (!cm_node) + return ERR_PTR(-ENOMEM); + /* set our node side to client (active) side */ + cm_node->tcp_cntxt.client = 1; + cm_node->tcp_cntxt.rcv_wscale = I40IW_CM_DEFAULT_RCV_WND_SCALE; + + i40iw_record_ird_ord(cm_node, conn_param->ird, conn_param->ord); + + if (!memcmp(cm_info->loc_addr, cm_info->rem_addr, sizeof(cm_info->loc_addr))) { + loopback_remotelistener = i40iw_find_listener( + cm_core, + cm_info->rem_addr, + cm_node->rem_port, + cm_node->vlan_id, + I40IW_CM_LISTENER_ACTIVE_STATE); + if (!loopback_remotelistener) { + i40iw_rem_ref_cm_node(cm_node); + return ERR_PTR(-ECONNREFUSED); + } else { + loopback_cm_info = *cm_info; + loopback_cm_info.loc_port = cm_info->rem_port; + loopback_cm_info.rem_port = cm_info->loc_port; + loopback_cm_info.cm_id = loopback_remotelistener->cm_id; + loopback_cm_info.ipv4 = cm_info->ipv4; + loopback_remotenode = i40iw_make_cm_node(cm_core, + iwdev, + &loopback_cm_info, + loopback_remotelistener); + if (!loopback_remotenode) { + i40iw_rem_ref_cm_node(cm_node); + return ERR_PTR(-ENOMEM); + } + cm_core->stats_loopbacks++; + loopback_remotenode->loopbackpartner = cm_node; + loopback_remotenode->tcp_cntxt.rcv_wscale = + I40IW_CM_DEFAULT_RCV_WND_SCALE; + cm_node->loopbackpartner = loopback_remotenode; + memcpy(loopback_remotenode->pdata_buf, private_data, + private_data_len); + loopback_remotenode->pdata.size = private_data_len; + + if (loopback_remotenode->ord_size > cm_node->ird_size) + loopback_remotenode->ord_size = + cm_node->ird_size; + + cm_node->state = I40IW_CM_STATE_OFFLOADED; + cm_node->tcp_cntxt.rcv_nxt = + loopback_remotenode->tcp_cntxt.loc_seq_num; + loopback_remotenode->tcp_cntxt.rcv_nxt = + cm_node->tcp_cntxt.loc_seq_num; + cm_node->tcp_cntxt.max_snd_wnd = + loopback_remotenode->tcp_cntxt.rcv_wnd; + loopback_remotenode->tcp_cntxt.max_snd_wnd = cm_node->tcp_cntxt.rcv_wnd; + cm_node->tcp_cntxt.snd_wnd = loopback_remotenode->tcp_cntxt.rcv_wnd; + loopback_remotenode->tcp_cntxt.snd_wnd = cm_node->tcp_cntxt.rcv_wnd; + cm_node->tcp_cntxt.snd_wscale = loopback_remotenode->tcp_cntxt.rcv_wscale; + loopback_remotenode->tcp_cntxt.snd_wscale = cm_node->tcp_cntxt.rcv_wscale; + } + return cm_node; + } + + cm_node->pdata.size = private_data_len; + cm_node->pdata.addr = cm_node->pdata_buf; + + memcpy(cm_node->pdata_buf, private_data, private_data_len); + + cm_node->state = I40IW_CM_STATE_SYN_SENT; + return cm_node; +} + +/** + * i40iw_cm_reject - reject and teardown a connection + * @cm_node: connection's node + * @pdate: ptr to private data for reject + * @plen: size of private data + */ +static int i40iw_cm_reject(struct i40iw_cm_node *cm_node, const void *pdata, u8 plen) +{ + int ret = 0; + int err; + int passive_state; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_cm_node *loopback = cm_node->loopbackpartner; + + if (cm_node->tcp_cntxt.client) + return ret; + i40iw_cleanup_retrans_entry(cm_node); + + if (!loopback) { + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == I40IW_SEND_RESET_EVENT) { + cm_node->state = I40IW_CM_STATE_CLOSED; + i40iw_rem_ref_cm_node(cm_node); + } else { + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + i40iw_rem_ref_cm_node(cm_node); + } else { + ret = i40iw_send_mpa_reject(cm_node, pdata, plen); + if (ret) { + cm_node->state = I40IW_CM_STATE_CLOSED; + err = i40iw_send_reset(cm_node); + if (err) + i40iw_pr_err("send reset failed\n"); + } else { + cm_id->add_ref(cm_id); + } + } + } + } else { + cm_node->cm_id = NULL; + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + i40iw_rem_ref_cm_node(cm_node); + i40iw_rem_ref_cm_node(loopback); + } else { + ret = i40iw_send_cm_event(loopback, + loopback->cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNREFUSED); + i40iw_rem_ref_cm_node(cm_node); + loopback->state = I40IW_CM_STATE_CLOSING; + + cm_id = loopback->cm_id; + i40iw_rem_ref_cm_node(loopback); + cm_id->rem_ref(cm_id); + } + } + + return ret; +} + +/** + * i40iw_cm_close - close of cm connection + * @cm_node: connection's node + */ +static int i40iw_cm_close(struct i40iw_cm_node *cm_node) +{ + int ret = 0; + + if (!cm_node) + return -EINVAL; + + switch (cm_node->state) { + case I40IW_CM_STATE_SYN_RCVD: + case I40IW_CM_STATE_SYN_SENT: + case I40IW_CM_STATE_ONE_SIDE_ESTABLISHED: + case I40IW_CM_STATE_ESTABLISHED: + case I40IW_CM_STATE_ACCEPTING: + case I40IW_CM_STATE_MPAREQ_SENT: + case I40IW_CM_STATE_MPAREQ_RCVD: + i40iw_cleanup_retrans_entry(cm_node); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_CLOSE_WAIT: + cm_node->state = I40IW_CM_STATE_LAST_ACK; + i40iw_send_fin(cm_node); + break; + case I40IW_CM_STATE_FIN_WAIT1: + case I40IW_CM_STATE_FIN_WAIT2: + case I40IW_CM_STATE_LAST_ACK: + case I40IW_CM_STATE_TIME_WAIT: + case I40IW_CM_STATE_CLOSING: + ret = -1; + break; + case I40IW_CM_STATE_LISTENING: + i40iw_cleanup_retrans_entry(cm_node); + i40iw_send_reset(cm_node); + break; + case I40IW_CM_STATE_MPAREJ_RCVD: + case I40IW_CM_STATE_UNKNOWN: + case I40IW_CM_STATE_INITED: + case I40IW_CM_STATE_CLOSED: + case I40IW_CM_STATE_LISTENER_DESTROYED: + i40iw_rem_ref_cm_node(cm_node); + break; + case I40IW_CM_STATE_OFFLOADED: + if (cm_node->send_entry) + i40iw_pr_err("send_entry\n"); + i40iw_rem_ref_cm_node(cm_node); + break; + } + return ret; +} + +/** + * i40iw_receive_ilq - recv an ETHERNET packet, and process it + * through CM + * @vsi: pointer to the vsi structure + * @rbuf: receive buffer + */ +void i40iw_receive_ilq(struct i40iw_sc_vsi *vsi, struct i40iw_puda_buf *rbuf) +{ + struct i40iw_cm_node *cm_node; + struct i40iw_cm_listener *listener; + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcphdr *tcph; + struct i40iw_cm_info cm_info; + struct i40iw_sc_dev *dev = vsi->dev; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + struct vlan_ethhdr *ethh; + u16 vtag; + + /* if vlan, then maclen = 18 else 14 */ + iph = (struct iphdr *)rbuf->iph; + memset(&cm_info, 0, sizeof(cm_info)); + + i40iw_debug_buf(dev, + I40IW_DEBUG_ILQ, + "RECEIVE ILQ BUFFER", + rbuf->mem.va, + rbuf->totallen); + ethh = (struct vlan_ethhdr *)rbuf->mem.va; + + if (ethh->h_vlan_proto == htons(ETH_P_8021Q)) { + vtag = ntohs(ethh->h_vlan_TCI); + cm_info.user_pri = (vtag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + cm_info.vlan_id = vtag & VLAN_VID_MASK; + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s vlan_id=%d\n", + __func__, + cm_info.vlan_id); + } else { + cm_info.vlan_id = I40IW_NO_VLAN; + } + tcph = (struct tcphdr *)rbuf->tcph; + + if (rbuf->ipv4) { + cm_info.loc_addr[0] = ntohl(iph->daddr); + cm_info.rem_addr[0] = ntohl(iph->saddr); + cm_info.ipv4 = true; + cm_info.tos = iph->tos; + } else { + ip6h = (struct ipv6hdr *)rbuf->iph; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + ip6h->daddr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(cm_info.rem_addr, + ip6h->saddr.in6_u.u6_addr32); + cm_info.ipv4 = false; + cm_info.tos = (ip6h->priority << 4) | (ip6h->flow_lbl[0] >> 4); + } + cm_info.loc_port = ntohs(tcph->dest); + cm_info.rem_port = ntohs(tcph->source); + cm_node = i40iw_find_node(cm_core, + cm_info.rem_port, + cm_info.rem_addr, + cm_info.loc_port, + cm_info.loc_addr, + true, + false); + + if (!cm_node) { + /* Only type of packet accepted are for */ + /* the PASSIVE open (syn only) */ + if (!tcph->syn || tcph->ack) + return; + listener = + i40iw_find_listener(cm_core, + cm_info.loc_addr, + cm_info.loc_port, + cm_info.vlan_id, + I40IW_CM_LISTENER_ACTIVE_STATE); + if (!listener) { + cm_info.cm_id = NULL; + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s no listener found\n", + __func__); + return; + } + cm_info.cm_id = listener->cm_id; + cm_node = i40iw_make_cm_node(cm_core, iwdev, &cm_info, listener); + if (!cm_node) { + i40iw_debug(cm_core->dev, + I40IW_DEBUG_CM, + "%s allocate node failed\n", + __func__); + atomic_dec(&listener->ref_count); + return; + } + if (!tcph->rst && !tcph->fin) { + cm_node->state = I40IW_CM_STATE_LISTENING; + } else { + i40iw_rem_ref_cm_node(cm_node); + return; + } + atomic_inc(&cm_node->ref_count); + } else if (cm_node->state == I40IW_CM_STATE_OFFLOADED) { + i40iw_rem_ref_cm_node(cm_node); + return; + } + i40iw_process_packet(cm_node, rbuf); + i40iw_rem_ref_cm_node(cm_node); +} + +/** + * i40iw_setup_cm_core - allocate a top level instance of a cm + * core + * @iwdev: iwarp device structure + */ +void i40iw_setup_cm_core(struct i40iw_device *iwdev) +{ + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + + cm_core->iwdev = iwdev; + cm_core->dev = &iwdev->sc_dev; + + INIT_LIST_HEAD(&cm_core->accelerated_list); + INIT_LIST_HEAD(&cm_core->non_accelerated_list); + INIT_LIST_HEAD(&cm_core->listen_nodes); + + timer_setup(&cm_core->tcp_timer, i40iw_cm_timer_tick, 0); + + spin_lock_init(&cm_core->ht_lock); + spin_lock_init(&cm_core->listen_list_lock); + spin_lock_init(&cm_core->apbvt_lock); + + cm_core->event_wq = alloc_ordered_workqueue("iwewq", + WQ_MEM_RECLAIM); + + cm_core->disconn_wq = alloc_ordered_workqueue("iwdwq", + WQ_MEM_RECLAIM); +} + +/** + * i40iw_cleanup_cm_core - deallocate a top level instance of a + * cm core + * @cm_core: cm's core + */ +void i40iw_cleanup_cm_core(struct i40iw_cm_core *cm_core) +{ + unsigned long flags; + + if (!cm_core) + return; + + spin_lock_irqsave(&cm_core->ht_lock, flags); + if (timer_pending(&cm_core->tcp_timer)) + del_timer_sync(&cm_core->tcp_timer); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + destroy_workqueue(cm_core->event_wq); + destroy_workqueue(cm_core->disconn_wq); +} + +/** + * i40iw_init_tcp_ctx - setup qp context + * @cm_node: connection's node + * @tcp_info: offload info for tcp + * @iwqp: associate qp for the connection + */ +static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node, + struct i40iw_tcp_offload_info *tcp_info, + struct i40iw_qp *iwqp) +{ + tcp_info->ipv4 = cm_node->ipv4; + tcp_info->drop_ooo_seg = true; + tcp_info->wscale = true; + tcp_info->ignore_tcp_opt = true; + tcp_info->ignore_tcp_uns_opt = true; + tcp_info->no_nagle = false; + + tcp_info->ttl = I40IW_DEFAULT_TTL; + tcp_info->rtt_var = cpu_to_le32(I40IW_DEFAULT_RTT_VAR); + tcp_info->ss_thresh = cpu_to_le32(I40IW_DEFAULT_SS_THRESH); + tcp_info->rexmit_thresh = I40IW_DEFAULT_REXMIT_THRESH; + + tcp_info->tcp_state = I40IW_TCP_STATE_ESTABLISHED; + tcp_info->snd_wscale = cm_node->tcp_cntxt.snd_wscale; + tcp_info->rcv_wscale = cm_node->tcp_cntxt.rcv_wscale; + + tcp_info->snd_nxt = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->snd_wnd = cpu_to_le32(cm_node->tcp_cntxt.snd_wnd); + tcp_info->rcv_nxt = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); + tcp_info->snd_max = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + + tcp_info->snd_una = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->cwnd = cpu_to_le32(2 * cm_node->tcp_cntxt.mss); + tcp_info->snd_wl1 = cpu_to_le32(cm_node->tcp_cntxt.rcv_nxt); + tcp_info->snd_wl2 = cpu_to_le32(cm_node->tcp_cntxt.loc_seq_num); + tcp_info->max_snd_window = cpu_to_le32(cm_node->tcp_cntxt.max_snd_wnd); + tcp_info->rcv_wnd = cpu_to_le32(cm_node->tcp_cntxt.rcv_wnd << + cm_node->tcp_cntxt.rcv_wscale); + + tcp_info->flow_label = 0; + tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); + if (cm_node->vlan_id < VLAN_TAG_PRESENT) { + tcp_info->insert_vlan_tag = true; + tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) | + cm_node->vlan_id); + } + if (cm_node->ipv4) { + tcp_info->src_port = cpu_to_le16(cm_node->loc_port); + tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); + + tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[0]); + tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[0]); + tcp_info->arp_idx = + cpu_to_le16((u16)i40iw_arp_table( + iwqp->iwdev, + &tcp_info->dest_ip_addr3, + true, + NULL, + I40IW_ARP_RESOLVE)); + } else { + tcp_info->src_port = cpu_to_le16(cm_node->loc_port); + tcp_info->dst_port = cpu_to_le16(cm_node->rem_port); + tcp_info->dest_ip_addr0 = cpu_to_le32(cm_node->rem_addr[0]); + tcp_info->dest_ip_addr1 = cpu_to_le32(cm_node->rem_addr[1]); + tcp_info->dest_ip_addr2 = cpu_to_le32(cm_node->rem_addr[2]); + tcp_info->dest_ip_addr3 = cpu_to_le32(cm_node->rem_addr[3]); + tcp_info->local_ipaddr0 = cpu_to_le32(cm_node->loc_addr[0]); + tcp_info->local_ipaddr1 = cpu_to_le32(cm_node->loc_addr[1]); + tcp_info->local_ipaddr2 = cpu_to_le32(cm_node->loc_addr[2]); + tcp_info->local_ipaddr3 = cpu_to_le32(cm_node->loc_addr[3]); + tcp_info->arp_idx = + cpu_to_le16((u16)i40iw_arp_table( + iwqp->iwdev, + &tcp_info->dest_ip_addr0, + false, + NULL, + I40IW_ARP_RESOLVE)); + } +} + +/** + * i40iw_cm_init_tsa_conn - setup qp for RTS + * @iwqp: associate qp for the connection + * @cm_node: connection's node + */ +static void i40iw_cm_init_tsa_conn(struct i40iw_qp *iwqp, + struct i40iw_cm_node *cm_node) +{ + struct i40iw_tcp_offload_info tcp_info; + struct i40iwarp_offload_info *iwarp_info; + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; + + memset(&tcp_info, 0x00, sizeof(struct i40iw_tcp_offload_info)); + iwarp_info = &iwqp->iwarp_info; + ctx_info = &iwqp->ctx_info; + + ctx_info->tcp_info = &tcp_info; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + + iwarp_info->ord_size = cm_node->ord_size; + iwarp_info->ird_size = i40iw_derive_hw_ird_setting(cm_node->ird_size); + + if (iwarp_info->ord_size == 1) + iwarp_info->ord_size = 2; + + iwarp_info->rd_enable = true; + iwarp_info->rdmap_ver = 1; + iwarp_info->ddp_ver = 1; + + iwarp_info->pd_id = iwqp->iwpd->sc_pd.pd_id; + + ctx_info->tcp_info_valid = true; + ctx_info->iwarp_info_valid = true; + ctx_info->add_to_qoslist = true; + ctx_info->user_pri = cm_node->user_pri; + + i40iw_init_tcp_ctx(cm_node, &tcp_info, iwqp); + if (cm_node->snd_mark_en) { + iwarp_info->snd_mark_en = true; + iwarp_info->snd_mark_offset = (tcp_info.snd_nxt & + SNDMARKER_SEQNMASK) + cm_node->lsmm_size; + } + + cm_node->state = I40IW_CM_STATE_OFFLOADED; + tcp_info.tcp_state = I40IW_TCP_STATE_ESTABLISHED; + tcp_info.src_mac_addr_idx = iwdev->mac_ip_table_idx; + tcp_info.tos = cm_node->tos; + + dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, (u64 *)(iwqp->host_ctx.va), ctx_info); + + /* once tcp_info is set, no need to do it again */ + ctx_info->tcp_info_valid = false; + ctx_info->iwarp_info_valid = false; + ctx_info->add_to_qoslist = false; +} + +/** + * i40iw_cm_disconn - when a connection is being closed + * @iwqp: associate qp for the connection + */ +void i40iw_cm_disconn(struct i40iw_qp *iwqp) +{ + struct disconn_work *work; + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + unsigned long flags; + + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return; /* Timer will clean up */ + + spin_lock_irqsave(&iwdev->qptable_lock, flags); + if (!iwdev->qp_table[iwqp->ibqp.qp_num]) { + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, + "%s qp_id %d is already freed\n", + __func__, iwqp->ibqp.qp_num); + kfree(work); + return; + } + i40iw_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + + work->iwqp = iwqp; + INIT_WORK(&work->work, i40iw_disconnect_worker); + queue_work(cm_core->disconn_wq, &work->work); + return; +} + +/** + * i40iw_qp_disconnect - free qp and close cm + * @iwqp: associate qp for the connection + */ +static void i40iw_qp_disconnect(struct i40iw_qp *iwqp) +{ + struct i40iw_device *iwdev; + struct i40iw_ib_device *iwibdev; + + iwdev = to_iwdev(iwqp->ibqp.device); + if (!iwdev) { + i40iw_pr_err("iwdev == NULL\n"); + return; + } + + iwibdev = iwdev->iwibdev; + + if (iwqp->active_conn) { + /* indicate this connection is NOT active */ + iwqp->active_conn = 0; + } else { + /* Need to free the Last Streaming Mode Message */ + if (iwqp->ietf_mem.va) { + if (iwqp->lsmm_mr) + iwibdev->ibdev.dereg_mr(iwqp->lsmm_mr); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->ietf_mem); + } + } + + /* close the CM node down if it is still active */ + if (iwqp->cm_node) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, "%s Call close API\n", __func__); + i40iw_cm_close(iwqp->cm_node); + } +} + +/** + * i40iw_cm_disconn_true - called by worker thread to disconnect qp + * @iwqp: associate qp for the connection + */ +static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) +{ + struct iw_cm_id *cm_id; + struct i40iw_device *iwdev; + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + u16 last_ae; + u8 original_hw_tcp_state; + u8 original_ibqp_state; + int disconn_status = 0; + int issue_disconn = 0; + int issue_close = 0; + int issue_flush = 0; + struct ib_event ibevent; + unsigned long flags; + int ret; + + if (!iwqp) { + i40iw_pr_err("iwqp == NULL\n"); + return; + } + + spin_lock_irqsave(&iwqp->lock, flags); + cm_id = iwqp->cm_id; + /* make sure we havent already closed this connection */ + if (!cm_id) { + spin_unlock_irqrestore(&iwqp->lock, flags); + return; + } + + iwdev = to_iwdev(iwqp->ibqp.device); + + original_hw_tcp_state = iwqp->hw_tcp_state; + original_ibqp_state = iwqp->ibqp_state; + last_ae = iwqp->last_aeq; + + if (qp->term_flags) { + issue_disconn = 1; + issue_close = 1; + iwqp->cm_id = NULL; + /*When term timer expires after cm_timer, don't want + *terminate-handler to issue cm_disconn which can re-free + *a QP even after its refcnt=0. + */ + i40iw_terminate_del_timer(qp); + if (!iwqp->flush_issued) { + iwqp->flush_issued = 1; + issue_flush = 1; + } + } else if ((original_hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) || + ((original_ibqp_state == IB_QPS_RTS) && + (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { + issue_disconn = 1; + if (last_ae == I40IW_AE_LLP_CONNECTION_RESET) + disconn_status = -ECONNRESET; + } + + if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || + (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || + (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || + (last_ae == I40IW_AE_LLP_CONNECTION_RESET) || + iwdev->reset)) { + issue_close = 1; + iwqp->cm_id = NULL; + if (!iwqp->flush_issued) { + iwqp->flush_issued = 1; + issue_flush = 1; + } + } + + spin_unlock_irqrestore(&iwqp->lock, flags); + if (issue_flush && !iwqp->destroyed) { + /* Flush the queues */ + i40iw_flush_wqes(iwdev, iwqp); + + if (qp->term_flags && iwqp->ibqp.event_handler) { + ibevent.device = iwqp->ibqp.device; + ibevent.event = (qp->eventtype == TERM_EVENT_QP_FATAL) ? + IB_EVENT_QP_FATAL : IB_EVENT_QP_ACCESS_ERR; + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); + } + } + + if (cm_id && cm_id->event_handler) { + if (issue_disconn) { + ret = i40iw_send_cm_event(NULL, + cm_id, + IW_CM_EVENT_DISCONNECT, + disconn_status); + + if (ret) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "disconnect event failed %s: - cm_id = %p\n", + __func__, cm_id); + } + if (issue_close) { + i40iw_qp_disconnect(iwqp); + cm_id->provider_data = iwqp; + ret = i40iw_send_cm_event(NULL, cm_id, IW_CM_EVENT_CLOSE, 0); + if (ret) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "close event failed %s: - cm_id = %p\n", + __func__, cm_id); + cm_id->rem_ref(cm_id); + } + } +} + +/** + * i40iw_disconnect_worker - worker for connection close + * @work: points or disconn structure + */ +static void i40iw_disconnect_worker(struct work_struct *work) +{ + struct disconn_work *dwork = container_of(work, struct disconn_work, work); + struct i40iw_qp *iwqp = dwork->iwqp; + + kfree(dwork); + i40iw_cm_disconn_true(iwqp); + i40iw_rem_ref(&iwqp->ibqp); +} + +/** + * i40iw_accept - registered call for connection to be accepted + * @cm_id: cm information for passive connection + * @conn_param: accpet parameters + */ +int i40iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + struct ib_qp *ibqp; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + struct i40iw_cm_core *cm_core; + struct i40iw_cm_node *cm_node; + struct ib_qp_attr attr; + int passive_state; + struct ib_mr *ibmr; + struct i40iw_pd *iwpd; + u16 buf_len = 0; + struct i40iw_kmem_info accept; + enum i40iw_status_code status; + u64 tagged_offset; + unsigned long flags; + + memset(&attr, 0, sizeof(attr)); + ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); + if (!ibqp) + return -EINVAL; + + iwqp = to_iwqp(ibqp); + iwdev = iwqp->iwdev; + dev = &iwdev->sc_dev; + cm_core = &iwdev->cm_core; + cm_node = (struct i40iw_cm_node *)cm_id->provider_data; + + if (((struct sockaddr_in *)&cm_id->local_addr)->sin_family == AF_INET) { + cm_node->ipv4 = true; + cm_node->vlan_id = i40iw_get_vlan_ipv4(cm_node->loc_addr); + } else { + cm_node->ipv4 = false; + i40iw_netdev_vlan_ipv6(cm_node->loc_addr, &cm_node->vlan_id); + } + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "Accept vlan_id=%d\n", + cm_node->vlan_id); + if (cm_node->state == I40IW_CM_STATE_LISTENER_DESTROYED) { + if (cm_node->loopbackpartner) + i40iw_rem_ref_cm_node(cm_node->loopbackpartner); + i40iw_rem_ref_cm_node(cm_node); + return -EINVAL; + } + + passive_state = atomic_add_return(1, &cm_node->passive_state); + if (passive_state == I40IW_SEND_RESET_EVENT) { + i40iw_rem_ref_cm_node(cm_node); + return -ECONNRESET; + } + + cm_node->cm_core->stats_accepts++; + iwqp->cm_node = (void *)cm_node; + cm_node->iwqp = iwqp; + + buf_len = conn_param->private_data_len + I40IW_MAX_IETF_SIZE; + + status = i40iw_allocate_dma_mem(dev->hw, &iwqp->ietf_mem, buf_len, 1); + + if (status) + return -ENOMEM; + cm_node->pdata.size = conn_param->private_data_len; + accept.addr = iwqp->ietf_mem.va; + accept.size = i40iw_cm_build_mpa_frame(cm_node, &accept, MPA_KEY_REPLY); + memcpy(accept.addr + accept.size, conn_param->private_data, + conn_param->private_data_len); + + /* setup our first outgoing iWarp send WQE (the IETF frame response) */ + if ((cm_node->ipv4 && + !i40iw_ipv4_is_loopback(cm_node->loc_addr[0], cm_node->rem_addr[0])) || + (!cm_node->ipv4 && + !i40iw_ipv6_is_loopback(cm_node->loc_addr, cm_node->rem_addr))) { + iwpd = iwqp->iwpd; + tagged_offset = (uintptr_t)iwqp->ietf_mem.va; + ibmr = i40iw_reg_phys_mr(&iwpd->ibpd, + iwqp->ietf_mem.pa, + buf_len, + IB_ACCESS_LOCAL_WRITE, + &tagged_offset); + if (IS_ERR(ibmr)) { + i40iw_free_dma_mem(dev->hw, &iwqp->ietf_mem); + return -ENOMEM; + } + + ibmr->pd = &iwpd->ibpd; + ibmr->device = iwpd->ibpd.device; + iwqp->lsmm_mr = ibmr; + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, + iwqp->ietf_mem.va, + (accept.size + conn_param->private_data_len), + ibmr->lkey); + + } else { + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + dev->iw_priv_qp_ops->qp_send_lsmm(&iwqp->sc_qp, NULL, 0, 0); + } + + if (iwqp->page) + kunmap(iwqp->page); + + iwqp->cm_id = cm_id; + cm_node->cm_id = cm_id; + + cm_id->provider_data = (void *)iwqp; + iwqp->active_conn = 0; + + cm_node->lsmm_size = accept.size + conn_param->private_data_len; + i40iw_cm_init_tsa_conn(iwqp, cm_node); + cm_id->add_ref(cm_id); + i40iw_add_ref(&iwqp->ibqp); + + attr.qp_state = IB_QPS_RTS; + cm_node->qhash_set = false; + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + + cm_node->accelerated = true; + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_move_tail(&cm_node->list, &cm_core->accelerated_list); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + status = + i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_ESTABLISHED, 0); + if (status) + i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - ESTABLISHED\n"); + + if (cm_node->loopbackpartner) { + cm_node->loopbackpartner->pdata.size = conn_param->private_data_len; + + /* copy entire MPA frame to our cm_node's frame */ + memcpy(cm_node->loopbackpartner->pdata_buf, + conn_param->private_data, + conn_param->private_data_len); + i40iw_create_event(cm_node->loopbackpartner, I40IW_CM_EVENT_CONNECTED); + } + + if (cm_node->accept_pend) { + atomic_dec(&cm_node->listener->pend_accepts_cnt); + cm_node->accept_pend = 0; + } + return 0; +} + +/** + * i40iw_reject - registered call for connection to be rejected + * @cm_id: cm information for passive connection + * @pdata: private data to be sent + * @pdata_len: private data length + */ +int i40iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) +{ + struct i40iw_device *iwdev; + struct i40iw_cm_node *cm_node; + struct i40iw_cm_node *loopback; + + cm_node = (struct i40iw_cm_node *)cm_id->provider_data; + loopback = cm_node->loopbackpartner; + cm_node->cm_id = cm_id; + cm_node->pdata.size = pdata_len; + + iwdev = to_iwdev(cm_id->device); + if (!iwdev) + return -EINVAL; + cm_node->cm_core->stats_rejects++; + + if (pdata_len + sizeof(struct ietf_mpa_v2) > MAX_CM_BUFFER) + return -EINVAL; + + if (loopback) { + memcpy(&loopback->pdata_buf, pdata, pdata_len); + loopback->pdata.size = pdata_len; + } + + return i40iw_cm_reject(cm_node, pdata, pdata_len); +} + +/** + * i40iw_connect - registered call for connection to be established + * @cm_id: cm information for passive connection + * @conn_param: Information about the connection + */ +int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) +{ + struct ib_qp *ibqp; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_cm_node *cm_node; + struct i40iw_cm_info cm_info; + struct sockaddr_in *laddr; + struct sockaddr_in *raddr; + struct sockaddr_in6 *laddr6; + struct sockaddr_in6 *raddr6; + int ret = 0; + + ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); + if (!ibqp) + return -EINVAL; + iwqp = to_iwqp(ibqp); + if (!iwqp) + return -EINVAL; + iwdev = to_iwdev(iwqp->ibqp.device); + if (!iwdev) + return -EINVAL; + + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + raddr6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; + + if (!(laddr->sin_port) || !(raddr->sin_port)) + return -EINVAL; + + iwqp->active_conn = 1; + iwqp->cm_id = NULL; + cm_id->provider_data = iwqp; + + /* set up the connection params for the node */ + if (cm_id->remote_addr.ss_family == AF_INET) { + cm_info.ipv4 = true; + memset(cm_info.loc_addr, 0, sizeof(cm_info.loc_addr)); + memset(cm_info.rem_addr, 0, sizeof(cm_info.rem_addr)); + cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); + cm_info.rem_addr[0] = ntohl(raddr->sin_addr.s_addr); + cm_info.loc_port = ntohs(laddr->sin_port); + cm_info.rem_port = ntohs(raddr->sin_port); + cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); + } else { + cm_info.ipv4 = false; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + laddr6->sin6_addr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(cm_info.rem_addr, + raddr6->sin6_addr.in6_u.u6_addr32); + cm_info.loc_port = ntohs(laddr6->sin6_port); + cm_info.rem_port = ntohs(raddr6->sin6_port); + i40iw_netdev_vlan_ipv6(cm_info.loc_addr, &cm_info.vlan_id); + } + cm_info.cm_id = cm_id; + cm_info.tos = cm_id->tos; + cm_info.user_pri = rt_tos2priority(cm_id->tos); + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", + __func__, cm_id->tos, cm_info.user_pri); + cm_id->add_ref(cm_id); + cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, + conn_param, &cm_info); + + if (IS_ERR(cm_node)) { + ret = PTR_ERR(cm_node); + cm_id->rem_ref(cm_id); + return ret; + } + + if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) || + (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32, + raddr6->sin6_addr.in6_u.u6_addr32, + sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) { + if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED, + I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) { + ret = -EINVAL; + goto err; + } + cm_node->qhash_set = true; + } + + if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, + I40IW_MANAGE_APBVT_ADD)) { + ret = -EINVAL; + goto err; + } + + cm_node->apbvt_set = true; + iwqp->cm_node = cm_node; + cm_node->iwqp = iwqp; + iwqp->cm_id = cm_id; + i40iw_add_ref(&iwqp->ibqp); + + if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { + cm_node->state = I40IW_CM_STATE_SYN_SENT; + ret = i40iw_send_syn(cm_node, 0); + if (ret) + goto err; + } + + if (cm_node->loopbackpartner) { + cm_node->loopbackpartner->state = I40IW_CM_STATE_MPAREQ_RCVD; + i40iw_create_event(cm_node->loopbackpartner, + I40IW_CM_EVENT_MPA_REQ); + } + + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "Api - connect(): port=0x%04x, cm_node=%p, cm_id = %p.\n", + cm_node->rem_port, + cm_node, + cm_node->cm_id); + + return 0; + +err: + if (cm_info.ipv4) + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Api - connect() FAILED: dest addr=%pI4", + cm_info.rem_addr); + else + i40iw_debug(&iwdev->sc_dev, + I40IW_DEBUG_CM, + "Api - connect() FAILED: dest addr=%pI6", + cm_info.rem_addr); + + i40iw_rem_ref_cm_node(cm_node); + cm_id->rem_ref(cm_id); + iwdev->cm_core.stats_connect_errs++; + return ret; +} + +/** + * i40iw_create_listen - registered call creating listener + * @cm_id: cm information for passive connection + * @backlog: to max accept pending count + */ +int i40iw_create_listen(struct iw_cm_id *cm_id, int backlog) +{ + struct i40iw_device *iwdev; + struct i40iw_cm_listener *cm_listen_node; + struct i40iw_cm_info cm_info; + enum i40iw_status_code ret; + struct sockaddr_in *laddr; + struct sockaddr_in6 *laddr6; + bool wildcard = false; + + iwdev = to_iwdev(cm_id->device); + if (!iwdev) + return -EINVAL; + + laddr = (struct sockaddr_in *)&cm_id->m_local_addr; + laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; + memset(&cm_info, 0, sizeof(cm_info)); + if (laddr->sin_family == AF_INET) { + cm_info.ipv4 = true; + cm_info.loc_addr[0] = ntohl(laddr->sin_addr.s_addr); + cm_info.loc_port = ntohs(laddr->sin_port); + + if (laddr->sin_addr.s_addr != INADDR_ANY) + cm_info.vlan_id = i40iw_get_vlan_ipv4(cm_info.loc_addr); + else + wildcard = true; + + } else { + cm_info.ipv4 = false; + i40iw_copy_ip_ntohl(cm_info.loc_addr, + laddr6->sin6_addr.in6_u.u6_addr32); + cm_info.loc_port = ntohs(laddr6->sin6_port); + if (ipv6_addr_type(&laddr6->sin6_addr) != IPV6_ADDR_ANY) + i40iw_netdev_vlan_ipv6(cm_info.loc_addr, + &cm_info.vlan_id); + else + wildcard = true; + } + cm_info.backlog = backlog; + cm_info.cm_id = cm_id; + + cm_listen_node = i40iw_make_listen_node(&iwdev->cm_core, iwdev, &cm_info); + if (!cm_listen_node) { + i40iw_pr_err("cm_listen_node == NULL\n"); + return -ENOMEM; + } + + cm_id->provider_data = cm_listen_node; + + cm_listen_node->tos = cm_id->tos; + cm_listen_node->user_pri = rt_tos2priority(cm_id->tos); + cm_info.user_pri = cm_listen_node->user_pri; + + if (!cm_listen_node->reused_node) { + if (wildcard) { + if (cm_info.ipv4) + ret = i40iw_add_mqh_4(iwdev, + &cm_info, + cm_listen_node); + else + ret = i40iw_add_mqh_6(iwdev, + &cm_info, + cm_listen_node); + if (ret) + goto error; + + ret = i40iw_manage_apbvt(iwdev, + cm_info.loc_port, + I40IW_MANAGE_APBVT_ADD); + + if (ret) + goto error; + } else { + ret = i40iw_manage_qhash(iwdev, + &cm_info, + I40IW_QHASH_TYPE_TCP_SYN, + I40IW_QHASH_MANAGE_TYPE_ADD, + NULL, + true); + if (ret) + goto error; + cm_listen_node->qhash_set = true; + ret = i40iw_manage_apbvt(iwdev, + cm_info.loc_port, + I40IW_MANAGE_APBVT_ADD); + if (ret) + goto error; + } + } + cm_id->add_ref(cm_id); + cm_listen_node->cm_core->stats_listen_created++; + return 0; + error: + i40iw_cm_del_listen(&iwdev->cm_core, (void *)cm_listen_node, false); + return -EINVAL; +} + +/** + * i40iw_destroy_listen - registered call to destroy listener + * @cm_id: cm information for passive connection + */ +int i40iw_destroy_listen(struct iw_cm_id *cm_id) +{ + struct i40iw_device *iwdev; + + iwdev = to_iwdev(cm_id->device); + if (cm_id->provider_data) + i40iw_cm_del_listen(&iwdev->cm_core, cm_id->provider_data, true); + else + i40iw_pr_err("cm_id->provider_data was NULL\n"); + + cm_id->rem_ref(cm_id); + + return 0; +} + +/** + * i40iw_cm_event_connected - handle connected active node + * @event: the info for cm_node of connection + */ +static void i40iw_cm_event_connected(struct i40iw_cm_event *event) +{ + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_cm_core *cm_core; + struct i40iw_cm_node *cm_node; + struct i40iw_sc_dev *dev; + struct ib_qp_attr attr; + struct iw_cm_id *cm_id; + unsigned long flags; + int status; + bool read0; + + cm_node = event->cm_node; + cm_id = cm_node->cm_id; + iwqp = (struct i40iw_qp *)cm_id->provider_data; + iwdev = to_iwdev(iwqp->ibqp.device); + dev = &iwdev->sc_dev; + cm_core = &iwdev->cm_core; + + if (iwqp->destroyed) { + status = -ETIMEDOUT; + goto error; + } + i40iw_cm_init_tsa_conn(iwqp, cm_node); + read0 = (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO); + if (iwqp->page) + iwqp->sc_qp.qp_uk.sq_base = kmap(iwqp->page); + dev->iw_priv_qp_ops->qp_send_rtt(&iwqp->sc_qp, read0); + if (iwqp->page) + kunmap(iwqp->page); + + memset(&attr, 0, sizeof(attr)); + attr.qp_state = IB_QPS_RTS; + cm_node->qhash_set = false; + i40iw_modify_qp(&iwqp->ibqp, &attr, IB_QP_STATE, NULL); + + cm_node->accelerated = true; + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_move_tail(&cm_node->list, &cm_core->accelerated_list); + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + status = i40iw_send_cm_event(cm_node, cm_id, IW_CM_EVENT_CONNECT_REPLY, + 0); + if (status) + i40iw_debug(dev, I40IW_DEBUG_CM, "error sending cm event - CONNECT_REPLY\n"); + + return; + +error: + iwqp->cm_id = NULL; + cm_id->provider_data = NULL; + i40iw_send_cm_event(event->cm_node, + cm_id, + IW_CM_EVENT_CONNECT_REPLY, + status); + cm_id->rem_ref(cm_id); + i40iw_rem_ref_cm_node(event->cm_node); +} + +/** + * i40iw_cm_event_reset - handle reset + * @event: the info for cm_node of connection + */ +static void i40iw_cm_event_reset(struct i40iw_cm_event *event) +{ + struct i40iw_cm_node *cm_node = event->cm_node; + struct iw_cm_id *cm_id = cm_node->cm_id; + struct i40iw_qp *iwqp; + + if (!cm_id) + return; + + iwqp = cm_id->provider_data; + if (!iwqp) + return; + + i40iw_debug(cm_node->dev, + I40IW_DEBUG_CM, + "reset event %p - cm_id = %p\n", + event->cm_node, cm_id); + iwqp->cm_id = NULL; + + i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_DISCONNECT, -ECONNRESET); + i40iw_send_cm_event(cm_node, cm_node->cm_id, IW_CM_EVENT_CLOSE, 0); +} + +/** + * i40iw_cm_event_handler - worker thread callback to send event to cm upper layer + * @work: pointer of cm event info. + */ +static void i40iw_cm_event_handler(struct work_struct *work) +{ + struct i40iw_cm_event *event = container_of(work, + struct i40iw_cm_event, + event_work); + struct i40iw_cm_node *cm_node; + + if (!event || !event->cm_node || !event->cm_node->cm_core) + return; + + cm_node = event->cm_node; + + switch (event->type) { + case I40IW_CM_EVENT_MPA_REQ: + i40iw_send_cm_event(cm_node, + cm_node->cm_id, + IW_CM_EVENT_CONNECT_REQUEST, + 0); + break; + case I40IW_CM_EVENT_RESET: + i40iw_cm_event_reset(event); + break; + case I40IW_CM_EVENT_CONNECTED: + if (!event->cm_node->cm_id || + (event->cm_node->state != I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_cm_event_connected(event); + break; + case I40IW_CM_EVENT_MPA_REJECT: + if (!event->cm_node->cm_id || + (cm_node->state == I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_send_cm_event(cm_node, + cm_node->cm_id, + IW_CM_EVENT_CONNECT_REPLY, + -ECONNREFUSED); + break; + case I40IW_CM_EVENT_ABORTED: + if (!event->cm_node->cm_id || + (event->cm_node->state == I40IW_CM_STATE_OFFLOADED)) + break; + i40iw_event_connect_error(event); + break; + default: + i40iw_pr_err("event type = %d\n", event->type); + break; + } + + event->cm_info.cm_id->rem_ref(event->cm_info.cm_id); + i40iw_rem_ref_cm_node(event->cm_node); + kfree(event); +} + +/** + * i40iw_cm_post_event - queue event request for worker thread + * @event: cm node's info for up event call + */ +static void i40iw_cm_post_event(struct i40iw_cm_event *event) +{ + atomic_inc(&event->cm_node->ref_count); + event->cm_info.cm_id->add_ref(event->cm_info.cm_id); + INIT_WORK(&event->event_work, i40iw_cm_event_handler); + + queue_work(event->cm_node->cm_core->event_wq, &event->event_work); +} + +/** + * i40iw_qhash_ctrl - enable/disable qhash for list + * @iwdev: device pointer + * @parent_listen_node: parent listen node + * @nfo: cm info node + * @ipaddr: Pointer to IPv4 or IPv6 address + * @ipv4: flag indicating IPv4 when true + * @ifup: flag indicating interface up when true + * + * Enables or disables the qhash for the node in the child + * listen list that matches ipaddr. If no matching IP was found + * it will allocate and add a new child listen node to the + * parent listen node. The listen_list_lock is assumed to be + * held when called. + */ +static void i40iw_qhash_ctrl(struct i40iw_device *iwdev, + struct i40iw_cm_listener *parent_listen_node, + struct i40iw_cm_info *nfo, + u32 *ipaddr, bool ipv4, bool ifup) +{ + struct list_head *child_listen_list = &parent_listen_node->child_listen_list; + struct i40iw_cm_listener *child_listen_node; + struct list_head *pos, *tpos; + enum i40iw_status_code ret; + bool node_allocated = false; + enum i40iw_quad_hash_manage_type op = + ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE; + + list_for_each_safe(pos, tpos, child_listen_list) { + child_listen_node = + list_entry(pos, + struct i40iw_cm_listener, + child_listen_list); + if (!memcmp(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16)) + goto set_qhash; + } + + /* if not found then add a child listener if interface is going up */ + if (!ifup) + return; + child_listen_node = kzalloc(sizeof(*child_listen_node), GFP_ATOMIC); + if (!child_listen_node) + return; + node_allocated = true; + memcpy(child_listen_node, parent_listen_node, sizeof(*child_listen_node)); + + memcpy(child_listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16); + +set_qhash: + memcpy(nfo->loc_addr, + child_listen_node->loc_addr, + sizeof(nfo->loc_addr)); + nfo->vlan_id = child_listen_node->vlan_id; + ret = i40iw_manage_qhash(iwdev, nfo, + I40IW_QHASH_TYPE_TCP_SYN, + op, + NULL, false); + if (!ret) { + child_listen_node->qhash_set = ifup; + if (node_allocated) + list_add(&child_listen_node->child_listen_list, + &parent_listen_node->child_listen_list); + } else if (node_allocated) { + kfree(child_listen_node); + } +} + +/** + * i40iw_cm_teardown_connections - teardown QPs + * @iwdev: device pointer + * @ipaddr: Pointer to IPv4 or IPv6 address + * @ipv4: flag indicating IPv4 when true + * @disconnect_all: flag indicating disconnect all QPs + * teardown QPs where source or destination addr matches ip addr + */ +void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr, + struct i40iw_cm_info *nfo, + bool disconnect_all) +{ + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + struct list_head *list_core_temp; + struct list_head *list_node; + struct i40iw_cm_node *cm_node; + unsigned long flags; + struct list_head teardown_list; + struct ib_qp_attr attr; + + INIT_LIST_HEAD(&teardown_list); + spin_lock_irqsave(&cm_core->ht_lock, flags); + list_for_each_safe(list_node, list_core_temp, + &cm_core->accelerated_list) { + cm_node = container_of(list_node, struct i40iw_cm_node, list); + if (disconnect_all || + (nfo->vlan_id == cm_node->vlan_id && + (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) || + !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->teardown_entry, &teardown_list); + } + } + list_for_each_safe(list_node, list_core_temp, + &cm_core->non_accelerated_list) { + cm_node = container_of(list_node, struct i40iw_cm_node, list); + if (disconnect_all || + (nfo->vlan_id == cm_node->vlan_id && + (!memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16) || + !memcmp(cm_node->rem_addr, ipaddr, nfo->ipv4 ? 4 : 16)))) { + atomic_inc(&cm_node->ref_count); + list_add(&cm_node->teardown_entry, &teardown_list); + } + } + spin_unlock_irqrestore(&cm_core->ht_lock, flags); + + list_for_each_safe(list_node, list_core_temp, &teardown_list) { + cm_node = container_of(list_node, struct i40iw_cm_node, + teardown_entry); + attr.qp_state = IB_QPS_ERR; + i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); + if (iwdev->reset) + i40iw_cm_disconn(cm_node->iwqp); + i40iw_rem_ref_cm_node(cm_node); + } +} + +/** + * i40iw_ifdown_notify - process an ifdown on an interface + * @iwdev: device pointer + * @ipaddr: Pointer to IPv4 or IPv6 address + * @ipv4: flag indicating IPv4 when true + * @ifup: flag indicating interface up when true + */ +void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev, + u32 *ipaddr, bool ipv4, bool ifup) +{ + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + unsigned long flags; + struct i40iw_cm_listener *listen_node; + static const u32 ip_zero[4] = { 0, 0, 0, 0 }; + struct i40iw_cm_info nfo; + u16 vlan_id = rdma_vlan_dev_vlan_id(netdev); + enum i40iw_status_code ret; + enum i40iw_quad_hash_manage_type op = + ifup ? I40IW_QHASH_MANAGE_TYPE_ADD : I40IW_QHASH_MANAGE_TYPE_DELETE; + + nfo.vlan_id = vlan_id; + nfo.ipv4 = ipv4; + + /* Disable or enable qhash for listeners */ + spin_lock_irqsave(&cm_core->listen_list_lock, flags); + list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { + if (vlan_id == listen_node->vlan_id && + (!memcmp(listen_node->loc_addr, ipaddr, ipv4 ? 4 : 16) || + !memcmp(listen_node->loc_addr, ip_zero, ipv4 ? 4 : 16))) { + memcpy(nfo.loc_addr, listen_node->loc_addr, + sizeof(nfo.loc_addr)); + nfo.loc_port = listen_node->loc_port; + nfo.user_pri = listen_node->user_pri; + if (!list_empty(&listen_node->child_listen_list)) { + i40iw_qhash_ctrl(iwdev, + listen_node, + &nfo, + ipaddr, ipv4, ifup); + } else if (memcmp(listen_node->loc_addr, ip_zero, + ipv4 ? 4 : 16)) { + ret = i40iw_manage_qhash(iwdev, + &nfo, + I40IW_QHASH_TYPE_TCP_SYN, + op, + NULL, + false); + if (!ret) + listen_node->qhash_set = ifup; + } + } + } + spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); + + /* teardown connected qp's on ifdown */ + if (!ifup) + i40iw_cm_teardown_connections(iwdev, ipaddr, &nfo, false); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h new file mode 100644 index 000000000..66dc1ba03 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h @@ -0,0 +1,462 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_CM_H +#define I40IW_CM_H + +#define QUEUE_EVENTS + +#define I40IW_MANAGE_APBVT_DEL 0 +#define I40IW_MANAGE_APBVT_ADD 1 + +#define I40IW_MPA_REQUEST_ACCEPT 1 +#define I40IW_MPA_REQUEST_REJECT 2 + +/* IETF MPA -- defines, enums, structs */ +#define IEFT_MPA_KEY_REQ "MPA ID Req Frame" +#define IEFT_MPA_KEY_REP "MPA ID Rep Frame" +#define IETF_MPA_KEY_SIZE 16 +#define IETF_MPA_VERSION 1 +#define IETF_MAX_PRIV_DATA_LEN 512 +#define IETF_MPA_FRAME_SIZE 20 +#define IETF_RTR_MSG_SIZE 4 +#define IETF_MPA_V2_FLAG 0x10 +#define SNDMARKER_SEQNMASK 0x000001FF + +#define I40IW_MAX_IETF_SIZE 32 + +/* IETF RTR MSG Fields */ +#define IETF_PEER_TO_PEER 0x8000 +#define IETF_FLPDU_ZERO_LEN 0x4000 +#define IETF_RDMA0_WRITE 0x8000 +#define IETF_RDMA0_READ 0x4000 +#define IETF_NO_IRD_ORD 0x3FFF + +/* HW-supported IRD sizes*/ +#define I40IW_HW_IRD_SETTING_2 2 +#define I40IW_HW_IRD_SETTING_4 4 +#define I40IW_HW_IRD_SETTING_8 8 +#define I40IW_HW_IRD_SETTING_16 16 +#define I40IW_HW_IRD_SETTING_32 32 +#define I40IW_HW_IRD_SETTING_64 64 + +#define MAX_PORTS 65536 +#define I40IW_VLAN_PRIO_SHIFT 13 + +enum ietf_mpa_flags { + IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ + IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ + IETF_MPA_FLAGS_REJECT = 0x20, /* Reject */ +}; + +struct ietf_mpa_v1 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + u8 priv_data[0]; +}; + +#define ietf_mpa_req_resp_frame ietf_mpa_frame + +struct ietf_rtr_msg { + __be16 ctrl_ird; + __be16 ctrl_ord; +}; + +struct ietf_mpa_v2 { + u8 key[IETF_MPA_KEY_SIZE]; + u8 flags; + u8 rev; + __be16 priv_data_len; + struct ietf_rtr_msg rtr_msg; + u8 priv_data[0]; +}; + +struct i40iw_cm_node; +enum i40iw_timer_type { + I40IW_TIMER_TYPE_SEND, + I40IW_TIMER_TYPE_RECV, + I40IW_TIMER_NODE_CLEANUP, + I40IW_TIMER_TYPE_CLOSE, +}; + +#define I40IW_PASSIVE_STATE_INDICATED 0 +#define I40IW_DO_NOT_SEND_RESET_EVENT 1 +#define I40IW_SEND_RESET_EVENT 2 + +#define MAX_I40IW_IFS 4 + +#define SET_ACK 0x1 +#define SET_SYN 0x2 +#define SET_FIN 0x4 +#define SET_RST 0x8 + +#define TCP_OPTIONS_PADDING 3 + +struct option_base { + u8 optionnum; + u8 length; +}; + +enum option_numbers { + OPTION_NUMBER_END, + OPTION_NUMBER_NONE, + OPTION_NUMBER_MSS, + OPTION_NUMBER_WINDOW_SCALE, + OPTION_NUMBER_SACK_PERM, + OPTION_NUMBER_SACK, + OPTION_NUMBER_WRITE0 = 0xbc +}; + +struct option_mss { + u8 optionnum; + u8 length; + __be16 mss; +}; + +struct option_windowscale { + u8 optionnum; + u8 length; + u8 shiftcount; +}; + +union all_known_options { + char as_end; + struct option_base as_base; + struct option_mss as_mss; + struct option_windowscale as_windowscale; +}; + +struct i40iw_timer_entry { + struct list_head list; + unsigned long timetosend; /* jiffies */ + struct i40iw_puda_buf *sqbuf; + u32 type; + u32 retrycount; + u32 retranscount; + u32 context; + u32 send_retrans; + int close_when_complete; +}; + +#define I40IW_DEFAULT_RETRYS 64 +#define I40IW_DEFAULT_RETRANS 8 +#define I40IW_DEFAULT_TTL 0x40 +#define I40IW_DEFAULT_RTT_VAR 0x6 +#define I40IW_DEFAULT_SS_THRESH 0x3FFFFFFF +#define I40IW_DEFAULT_REXMIT_THRESH 8 + +#define I40IW_RETRY_TIMEOUT HZ +#define I40IW_SHORT_TIME 10 +#define I40IW_LONG_TIME (2 * HZ) +#define I40IW_MAX_TIMEOUT ((unsigned long)(12 * HZ)) + +#define I40IW_CM_HASHTABLE_SIZE 1024 +#define I40IW_CM_TCP_TIMER_INTERVAL 3000 +#define I40IW_CM_DEFAULT_MTU 1540 +#define I40IW_CM_DEFAULT_FRAME_CNT 10 +#define I40IW_CM_THREAD_STACK_SIZE 256 +#define I40IW_CM_DEFAULT_RCV_WND 64240 +#define I40IW_CM_DEFAULT_RCV_WND_SCALED 0x3fffc +#define I40IW_CM_DEFAULT_RCV_WND_SCALE 2 +#define I40IW_CM_DEFAULT_FREE_PKTS 0x000A +#define I40IW_CM_FREE_PKT_LO_WATERMARK 2 + +#define I40IW_CM_DEFAULT_MSS 536 + +#define I40IW_CM_DEF_SEQ 0x159bf75f +#define I40IW_CM_DEF_LOCAL_ID 0x3b47 + +#define I40IW_CM_DEF_SEQ2 0x18ed5740 +#define I40IW_CM_DEF_LOCAL_ID2 0xb807 +#define MAX_CM_BUFFER (I40IW_MAX_IETF_SIZE + IETF_MAX_PRIV_DATA_LEN) + +typedef u32 i40iw_addr_t; + +#define i40iw_cm_tsa_context i40iw_qp_context + +struct i40iw_qp; + +/* cm node transition states */ +enum i40iw_cm_node_state { + I40IW_CM_STATE_UNKNOWN, + I40IW_CM_STATE_INITED, + I40IW_CM_STATE_LISTENING, + I40IW_CM_STATE_SYN_RCVD, + I40IW_CM_STATE_SYN_SENT, + I40IW_CM_STATE_ONE_SIDE_ESTABLISHED, + I40IW_CM_STATE_ESTABLISHED, + I40IW_CM_STATE_ACCEPTING, + I40IW_CM_STATE_MPAREQ_SENT, + I40IW_CM_STATE_MPAREQ_RCVD, + I40IW_CM_STATE_MPAREJ_RCVD, + I40IW_CM_STATE_OFFLOADED, + I40IW_CM_STATE_FIN_WAIT1, + I40IW_CM_STATE_FIN_WAIT2, + I40IW_CM_STATE_CLOSE_WAIT, + I40IW_CM_STATE_TIME_WAIT, + I40IW_CM_STATE_LAST_ACK, + I40IW_CM_STATE_CLOSING, + I40IW_CM_STATE_LISTENER_DESTROYED, + I40IW_CM_STATE_CLOSED +}; + +enum mpa_frame_version { + IETF_MPA_V1 = 1, + IETF_MPA_V2 = 2 +}; + +enum mpa_frame_key { + MPA_KEY_REQUEST, + MPA_KEY_REPLY +}; + +enum send_rdma0 { + SEND_RDMA_READ_ZERO = 1, + SEND_RDMA_WRITE_ZERO = 2 +}; + +enum i40iw_tcpip_pkt_type { + I40IW_PKT_TYPE_UNKNOWN, + I40IW_PKT_TYPE_SYN, + I40IW_PKT_TYPE_SYNACK, + I40IW_PKT_TYPE_ACK, + I40IW_PKT_TYPE_FIN, + I40IW_PKT_TYPE_RST +}; + +/* CM context params */ +struct i40iw_cm_tcp_context { + u8 client; + + u32 loc_seq_num; + u32 loc_ack_num; + u32 rem_ack_num; + u32 rcv_nxt; + + u32 loc_id; + u32 rem_id; + + u32 snd_wnd; + u32 max_snd_wnd; + + u32 rcv_wnd; + u32 mss; + u8 snd_wscale; + u8 rcv_wscale; +}; + +enum i40iw_cm_listener_state { + I40IW_CM_LISTENER_PASSIVE_STATE = 1, + I40IW_CM_LISTENER_ACTIVE_STATE = 2, + I40IW_CM_LISTENER_EITHER_STATE = 3 +}; + +struct i40iw_cm_listener { + struct list_head list; + struct i40iw_cm_core *cm_core; + u8 loc_mac[ETH_ALEN]; + u32 loc_addr[4]; + u16 loc_port; + struct iw_cm_id *cm_id; + atomic_t ref_count; + struct i40iw_device *iwdev; + atomic_t pend_accepts_cnt; + int backlog; + enum i40iw_cm_listener_state listener_state; + u32 reused_node; + u8 user_pri; + u8 tos; + u16 vlan_id; + bool qhash_set; + bool ipv4; + struct list_head child_listen_list; + +}; + +struct i40iw_kmem_info { + void *addr; + u32 size; +}; + +/* per connection node and node state information */ +struct i40iw_cm_node { + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + u16 vlan_id; + enum i40iw_cm_node_state state; + u8 loc_mac[ETH_ALEN]; + u8 rem_mac[ETH_ALEN]; + atomic_t ref_count; + struct i40iw_qp *iwqp; + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + struct i40iw_cm_tcp_context tcp_cntxt; + struct i40iw_cm_core *cm_core; + struct i40iw_cm_node *loopbackpartner; + struct i40iw_timer_entry *send_entry; + struct i40iw_timer_entry *close_entry; + spinlock_t retrans_list_lock; /* cm transmit packet */ + enum send_rdma0 send_rdma0_op; + u16 ird_size; + u16 ord_size; + u16 mpav2_ird_ord; + struct iw_cm_id *cm_id; + struct list_head list; + bool accelerated; + struct i40iw_cm_listener *listener; + int apbvt_set; + int accept_pend; + struct list_head timer_entry; + struct list_head reset_entry; + struct list_head teardown_entry; + atomic_t passive_state; + bool qhash_set; + u8 user_pri; + u8 tos; + bool ipv4; + bool snd_mark_en; + u16 lsmm_size; + enum mpa_frame_version mpa_frame_rev; + struct i40iw_kmem_info pdata; + union { + struct ietf_mpa_v1 mpa_frame; + struct ietf_mpa_v2 mpa_v2_frame; + }; + + u8 pdata_buf[IETF_MAX_PRIV_DATA_LEN]; + struct i40iw_kmem_info mpa_hdr; + bool ack_rcvd; +}; + +/* structure for client or CM to fill when making CM api calls. */ +/* - only need to set relevant data, based on op. */ +struct i40iw_cm_info { + struct iw_cm_id *cm_id; + u16 loc_port; + u16 rem_port; + u32 loc_addr[4]; + u32 rem_addr[4]; + u16 vlan_id; + int backlog; + u8 user_pri; + u8 tos; + bool ipv4; +}; + +/* CM event codes */ +enum i40iw_cm_event_type { + I40IW_CM_EVENT_UNKNOWN, + I40IW_CM_EVENT_ESTABLISHED, + I40IW_CM_EVENT_MPA_REQ, + I40IW_CM_EVENT_MPA_CONNECT, + I40IW_CM_EVENT_MPA_ACCEPT, + I40IW_CM_EVENT_MPA_REJECT, + I40IW_CM_EVENT_MPA_ESTABLISHED, + I40IW_CM_EVENT_CONNECTED, + I40IW_CM_EVENT_RESET, + I40IW_CM_EVENT_ABORTED +}; + +/* event to post to CM event handler */ +struct i40iw_cm_event { + enum i40iw_cm_event_type type; + struct i40iw_cm_info cm_info; + struct work_struct event_work; + struct i40iw_cm_node *cm_node; +}; + +struct i40iw_cm_core { + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + + struct list_head listen_nodes; + struct list_head accelerated_list; + struct list_head non_accelerated_list; + + struct timer_list tcp_timer; + + struct workqueue_struct *event_wq; + struct workqueue_struct *disconn_wq; + + spinlock_t ht_lock; /* manage hash table */ + spinlock_t listen_list_lock; /* listen list */ + spinlock_t apbvt_lock; /*manage apbvt entries*/ + + unsigned long ports_in_use[BITS_TO_LONGS(MAX_PORTS)]; + + u64 stats_nodes_created; + u64 stats_nodes_destroyed; + u64 stats_listen_created; + u64 stats_listen_destroyed; + u64 stats_listen_nodes_created; + u64 stats_listen_nodes_destroyed; + u64 stats_loopbacks; + u64 stats_accepts; + u64 stats_rejects; + u64 stats_connect_errs; + u64 stats_passive_errs; + u64 stats_pkt_retrans; + u64 stats_backlog_drops; +}; + +int i40iw_schedule_cm_timer(struct i40iw_cm_node *cm_node, + struct i40iw_puda_buf *sqbuf, + enum i40iw_timer_type type, + int send_retrans, + int close_when_complete); + +int i40iw_accept(struct iw_cm_id *, struct iw_cm_conn_param *); +int i40iw_reject(struct iw_cm_id *, const void *, u8); +int i40iw_connect(struct iw_cm_id *, struct iw_cm_conn_param *); +int i40iw_create_listen(struct iw_cm_id *, int); +int i40iw_destroy_listen(struct iw_cm_id *); + +int i40iw_cm_start(struct i40iw_device *); +int i40iw_cm_stop(struct i40iw_device *); + +int i40iw_arp_table(struct i40iw_device *iwdev, + u32 *ip_addr, + bool ipv4, + u8 *mac_addr, + u32 action); + +void i40iw_if_notify(struct i40iw_device *iwdev, struct net_device *netdev, + u32 *ipaddr, bool ipv4, bool ifup); +void i40iw_cm_teardown_connections(struct i40iw_device *iwdev, u32 *ipaddr, + struct i40iw_cm_info *nfo, + bool disconnect_all); +bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port); +#endif /* I40IW_CM_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c new file mode 100644 index 000000000..4d841a3c6 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -0,0 +1,5198 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +/** + * i40iw_insert_wqe_hdr - write wqe header + * @wqe: cqp wqe for header + * @header: header for the cqp wqe + */ +void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) +{ + wmb(); /* make sure WQE is populated before polarity is set */ + set_64bit_val(wqe, 24, header); +} + +void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev) +{ + if (cqp_timeout->compl_cqp_cmds != dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]) { + cqp_timeout->compl_cqp_cmds = dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]; + cqp_timeout->count = 0; + } else { + if (dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] != cqp_timeout->compl_cqp_cmds) + cqp_timeout->count++; + } +} + +/** + * i40iw_get_cqp_reg_info - get head and tail for cqp using registers + * @cqp: struct for cqp hw + * @val: cqp tail register value + * @tail:wqtail register value + * @error: cqp processing err + */ +static inline void i40iw_get_cqp_reg_info(struct i40iw_sc_cqp *cqp, + u32 *val, + u32 *tail, + u32 *error) +{ + if (cqp->dev->is_pf) { + *val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPTAIL); + *tail = RS_32(*val, I40E_PFPE_CQPTAIL_WQTAIL); + *error = RS_32(*val, I40E_PFPE_CQPTAIL_CQP_OP_ERR); + } else { + *val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPTAIL1); + *tail = RS_32(*val, I40E_VFPE_CQPTAIL_WQTAIL); + *error = RS_32(*val, I40E_VFPE_CQPTAIL_CQP_OP_ERR); + } +} + +/** + * i40iw_cqp_poll_registers - poll cqp registers + * @cqp: struct for cqp hw + * @tail:wqtail register value + * @count: how many times to try for completion + */ +static enum i40iw_status_code i40iw_cqp_poll_registers( + struct i40iw_sc_cqp *cqp, + u32 tail, + u32 count) +{ + u32 i = 0; + u32 newtail, error, val; + + while (i < count) { + i++; + i40iw_get_cqp_reg_info(cqp, &val, &newtail, &error); + if (error) { + error = (cqp->dev->is_pf) ? + i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES) : + i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); + return I40IW_ERR_CQP_COMPL_ERROR; + } + if (newtail != tail) { + /* SUCCESS */ + I40IW_RING_MOVE_TAIL(cqp->sq_ring); + cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++; + return 0; + } + udelay(I40IW_SLEEP_COUNT); + } + return I40IW_ERR_TIMEOUT; +} + +/** + * i40iw_sc_parse_fpm_commit_buf - parse fpm commit buffer + * @buf: ptr to fpm commit buffer + * @info: ptr to i40iw_hmc_obj_info struct + * @sd: number of SDs for HMC objects + * + * parses fpm commit info and copy base value + * of hmc objects in hmc_info + */ +static enum i40iw_status_code i40iw_sc_parse_fpm_commit_buf( + u64 *buf, + struct i40iw_hmc_obj_info *info, + u32 *sd) +{ + u64 temp; + u64 size; + u64 base = 0; + u32 i, j; + u32 k = 0; + + /* copy base values in obj_info */ + for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; i++, j += 8) { + if ((i == I40IW_HMC_IW_SRQ) || + (i == I40IW_HMC_IW_FSIMC) || + (i == I40IW_HMC_IW_FSIAV)) { + info[i].base = 0; + info[i].cnt = 0; + continue; + } + get_64bit_val(buf, j, &temp); + info[i].base = RS_64_1(temp, 32) * 512; + if (info[i].base > base) { + base = info[i].base; + k = i; + } + if (i == I40IW_HMC_IW_APBVT_ENTRY) { + info[i].cnt = 1; + continue; + } + if (i == I40IW_HMC_IW_QP) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + else if (i == I40IW_HMC_IW_CQ) + info[i].cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + else + info[i].cnt = (u32)(temp); + } + size = info[k].cnt * info[k].size + info[k].base; + if (size & 0x1FFFFF) + *sd = (u32)((size >> 21) + 1); /* add 1 for remainder */ + else + *sd = (u32)(size >> 21); + + return 0; +} + +/** + * i40iw_sc_decode_fpm_query() - Decode a 64 bit value into max count and size + * @buf: ptr to fpm query buffer + * @buf_idx: index into buf + * @info: ptr to i40iw_hmc_obj_info struct + * @rsrc_idx: resource index into info + * + * Decode a 64 bit value from fpm query buffer into max count and size + */ +static u64 i40iw_sc_decode_fpm_query(u64 *buf, + u32 buf_idx, + struct i40iw_hmc_obj_info *obj_info, + u32 rsrc_idx) +{ + u64 temp; + u32 size; + + get_64bit_val(buf, buf_idx, &temp); + obj_info[rsrc_idx].max_cnt = (u32)temp; + size = (u32)RS_64_1(temp, 32); + obj_info[rsrc_idx].size = LS_64_1(1, size); + + return temp; +} + +/** + * i40iw_sc_parse_fpm_query_buf() - parses fpm query buffer + * @buf: ptr to fpm query buffer + * @info: ptr to i40iw_hmc_obj_info struct + * @hmc_fpm_misc: ptr to fpm data + * + * parses fpm query buffer and copy max_cnt and + * size value of hmc objects in hmc_info + */ +static enum i40iw_status_code i40iw_sc_parse_fpm_query_buf( + u64 *buf, + struct i40iw_hmc_info *hmc_info, + struct i40iw_hmc_fpm_misc *hmc_fpm_misc) +{ + struct i40iw_hmc_obj_info *obj_info; + u64 temp; + u32 size; + u16 max_pe_sds; + + obj_info = hmc_info->hmc_obj; + + get_64bit_val(buf, 0, &temp); + hmc_info->first_sd_index = (u16)RS_64(temp, I40IW_QUERY_FPM_FIRST_PE_SD_INDEX); + max_pe_sds = (u16)RS_64(temp, I40IW_QUERY_FPM_MAX_PE_SDS); + + /* Reduce SD count for VFs by 1 to account for PBLE backing page rounding */ + if (hmc_info->hmc_fn_id >= I40IW_FIRST_VF_FPM_ID) + max_pe_sds--; + hmc_fpm_misc->max_sds = max_pe_sds; + hmc_info->sd_table.sd_cnt = max_pe_sds + hmc_info->first_sd_index; + + get_64bit_val(buf, 8, &temp); + obj_info[I40IW_HMC_IW_QP].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_QPS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_QP].size = LS_64_1(1, size); + + get_64bit_val(buf, 16, &temp); + obj_info[I40IW_HMC_IW_CQ].max_cnt = (u32)RS_64(temp, I40IW_QUERY_FPM_MAX_CQS); + size = (u32)RS_64_1(temp, 32); + obj_info[I40IW_HMC_IW_CQ].size = LS_64_1(1, size); + + i40iw_sc_decode_fpm_query(buf, 32, obj_info, I40IW_HMC_IW_HTE); + i40iw_sc_decode_fpm_query(buf, 40, obj_info, I40IW_HMC_IW_ARP); + + obj_info[I40IW_HMC_IW_APBVT_ENTRY].size = 8192; + obj_info[I40IW_HMC_IW_APBVT_ENTRY].max_cnt = 1; + + i40iw_sc_decode_fpm_query(buf, 48, obj_info, I40IW_HMC_IW_MR); + i40iw_sc_decode_fpm_query(buf, 56, obj_info, I40IW_HMC_IW_XF); + + get_64bit_val(buf, 64, &temp); + obj_info[I40IW_HMC_IW_XFFL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_XFFL].size = 4; + hmc_fpm_misc->xf_block_size = RS_64(temp, I40IW_QUERY_FPM_XFBLOCKSIZE); + if (!hmc_fpm_misc->xf_block_size) + return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 72, obj_info, I40IW_HMC_IW_Q1); + + get_64bit_val(buf, 80, &temp); + obj_info[I40IW_HMC_IW_Q1FL].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_Q1FL].size = 4; + hmc_fpm_misc->q1_block_size = RS_64(temp, I40IW_QUERY_FPM_Q1BLOCKSIZE); + if (!hmc_fpm_misc->q1_block_size) + return I40IW_ERR_INVALID_SIZE; + + i40iw_sc_decode_fpm_query(buf, 88, obj_info, I40IW_HMC_IW_TIMER); + + get_64bit_val(buf, 112, &temp); + obj_info[I40IW_HMC_IW_PBLE].max_cnt = (u32)temp; + obj_info[I40IW_HMC_IW_PBLE].size = 8; + + get_64bit_val(buf, 120, &temp); + hmc_fpm_misc->max_ceqs = (u8)RS_64(temp, I40IW_QUERY_FPM_MAX_CEQS); + hmc_fpm_misc->ht_multiplier = RS_64(temp, I40IW_QUERY_FPM_HTMULTIPLIER); + hmc_fpm_misc->timer_bucket = RS_64(temp, I40IW_QUERY_FPM_TIMERBUCKET); + + return 0; +} + +/** + * i40iw_fill_qos_list - Change all unknown qs handles to available ones + * @qs_list: list of qs_handles to be fixed with valid qs_handles + */ +static void i40iw_fill_qos_list(u16 *qs_list) +{ + u16 qshandle = qs_list[0]; + int i; + + for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { + if (qs_list[i] == QS_HANDLE_UNKNOWN) + qs_list[i] = qshandle; + else + qshandle = qs_list[i]; + } +} + +/** + * i40iw_qp_from_entry - Given entry, get to the qp structure + * @entry: Points to list of qp structure + */ +static struct i40iw_sc_qp *i40iw_qp_from_entry(struct list_head *entry) +{ + if (!entry) + return NULL; + + return (struct i40iw_sc_qp *)((char *)entry - offsetof(struct i40iw_sc_qp, list)); +} + +/** + * i40iw_get_qp - get the next qp from the list given current qp + * @head: Listhead of qp's + * @qp: current qp + */ +static struct i40iw_sc_qp *i40iw_get_qp(struct list_head *head, struct i40iw_sc_qp *qp) +{ + struct list_head *entry = NULL; + struct list_head *lastentry; + + if (list_empty(head)) + return NULL; + + if (!qp) { + entry = head->next; + } else { + lastentry = &qp->list; + entry = (lastentry != head) ? lastentry->next : NULL; + } + + return i40iw_qp_from_entry(entry); +} + +/** + * i40iw_change_l2params - given the new l2 parameters, change all qp + * @vsi: pointer to the vsi structure + * @l2params: New paramaters from l2 + */ +void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params) +{ + struct i40iw_sc_dev *dev = vsi->dev; + struct i40iw_sc_qp *qp = NULL; + bool qs_handle_change = false; + unsigned long flags; + u16 qs_handle; + int i; + + if (vsi->mtu != l2params->mtu) { + vsi->mtu = l2params->mtu; + i40iw_reinitialize_ieq(dev); + } + + i40iw_fill_qos_list(l2params->qs_handle_list); + for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { + qs_handle = l2params->qs_handle_list[i]; + if (vsi->qos[i].qs_handle != qs_handle) + qs_handle_change = true; + spin_lock_irqsave(&vsi->qos[i].lock, flags); + qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); + while (qp) { + if (qs_handle_change) { + qp->qs_handle = qs_handle; + /* issue cqp suspend command */ + i40iw_qp_suspend_resume(dev, qp, true); + } + qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); + } + spin_unlock_irqrestore(&vsi->qos[i].lock, flags); + vsi->qos[i].qs_handle = qs_handle; + } +} + +/** + * i40iw_qp_rem_qos - remove qp from qos lists during destroy qp + * @qp: qp to be removed from qos + */ +void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp) +{ + struct i40iw_sc_vsi *vsi = qp->vsi; + unsigned long flags; + + if (!qp->on_qoslist) + return; + spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags); + list_del(&qp->list); + spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags); +} + +/** + * i40iw_qp_add_qos - called during setctx fot qp to be added to qos + * @qp: qp to be added to qos + */ +void i40iw_qp_add_qos(struct i40iw_sc_qp *qp) +{ + struct i40iw_sc_vsi *vsi = qp->vsi; + unsigned long flags; + + if (qp->on_qoslist) + return; + spin_lock_irqsave(&vsi->qos[qp->user_pri].lock, flags); + qp->qs_handle = vsi->qos[qp->user_pri].qs_handle; + list_add(&qp->list, &vsi->qos[qp->user_pri].qplist); + qp->on_qoslist = true; + spin_unlock_irqrestore(&vsi->qos[qp->user_pri].lock, flags); +} + +/** + * i40iw_sc_pd_init - initialize sc pd struct + * @dev: sc device struct + * @pd: sc pd ptr + * @pd_id: pd_id for allocated pd + * @abi_ver: ABI version from user context, -1 if not valid + */ +static void i40iw_sc_pd_init(struct i40iw_sc_dev *dev, + struct i40iw_sc_pd *pd, + u16 pd_id, + int abi_ver) +{ + pd->size = sizeof(*pd); + pd->pd_id = pd_id; + pd->abi_ver = abi_ver; + pd->dev = dev; +} + +/** + * i40iw_get_encoded_wqe_size - given wq size, returns hardware encoded size + * @wqsize: size of the wq (sq, rq, srq) to encoded_size + * @cqpsq: encoded size for sq for cqp as its encoded size is 1+ other wq's + */ +u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq) +{ + u8 encoded_size = 0; + + /* cqp sq's hw coded value starts from 1 for size of 4 + * while it starts from 0 for qp' wq's. + */ + if (cqpsq) + encoded_size = 1; + wqsize >>= 2; + while (wqsize >>= 1) + encoded_size++; + return encoded_size; +} + +/** + * i40iw_sc_cqp_init - Initialize buffers for a control Queue Pair + * @cqp: IWARP control queue pair pointer + * @info: IWARP control queue pair init info pointer + * + * Initializes the object and context buffers for a control Queue Pair. + */ +static enum i40iw_status_code i40iw_sc_cqp_init(struct i40iw_sc_cqp *cqp, + struct i40iw_cqp_init_info *info) +{ + u8 hw_sq_size; + + if ((info->sq_size > I40IW_CQP_SW_SQSIZE_2048) || + (info->sq_size < I40IW_CQP_SW_SQSIZE_4) || + ((info->sq_size & (info->sq_size - 1)))) + return I40IW_ERR_INVALID_SIZE; + + hw_sq_size = i40iw_get_encoded_wqe_size(info->sq_size, true); + cqp->size = sizeof(*cqp); + cqp->sq_size = info->sq_size; + cqp->hw_sq_size = hw_sq_size; + cqp->sq_base = info->sq; + cqp->host_ctx = info->host_ctx; + cqp->sq_pa = info->sq_pa; + cqp->host_ctx_pa = info->host_ctx_pa; + cqp->dev = info->dev; + cqp->struct_ver = info->struct_ver; + cqp->scratch_array = info->scratch_array; + cqp->polarity = 0; + cqp->en_datacenter_tcp = info->en_datacenter_tcp; + cqp->enabled_vf_count = info->enabled_vf_count; + cqp->hmc_profile = info->hmc_profile; + info->dev->cqp = cqp; + + I40IW_RING_INIT(cqp->sq_ring, cqp->sq_size); + cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS] = 0; + cqp->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS] = 0; + INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); /* for the cqp commands backlog. */ + + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPTAIL, 0); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, 0); + + i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, + "%s: sq_size[%04d] hw_sq_size[%04d] sq_base[%p] sq_pa[%llxh] cqp[%p] polarity[x%04X]\n", + __func__, cqp->sq_size, cqp->hw_sq_size, + cqp->sq_base, cqp->sq_pa, cqp, cqp->polarity); + return 0; +} + +/** + * i40iw_sc_cqp_create - create cqp during bringup + * @cqp: struct for cqp hw + * @maj_err: If error, major err number + * @min_err: If error, minor err number + */ +static enum i40iw_status_code i40iw_sc_cqp_create(struct i40iw_sc_cqp *cqp, + u16 *maj_err, + u16 *min_err) +{ + u64 temp; + u32 cnt = 0, p1, p2, val = 0, err_code; + enum i40iw_status_code ret_code; + + *maj_err = 0; + *min_err = 0; + + ret_code = i40iw_allocate_dma_mem(cqp->dev->hw, + &cqp->sdbuf, + I40IW_UPDATE_SD_BUF_SIZE * cqp->sq_size, + I40IW_SD_BUF_ALIGNMENT); + + if (ret_code) + goto exit; + + temp = LS_64(cqp->hw_sq_size, I40IW_CQPHC_SQSIZE) | + LS_64(cqp->struct_ver, I40IW_CQPHC_SVER); + + set_64bit_val(cqp->host_ctx, 0, temp); + set_64bit_val(cqp->host_ctx, 8, cqp->sq_pa); + temp = LS_64(cqp->enabled_vf_count, I40IW_CQPHC_ENABLED_VFS) | + LS_64(cqp->hmc_profile, I40IW_CQPHC_HMC_PROFILE); + set_64bit_val(cqp->host_ctx, 16, temp); + set_64bit_val(cqp->host_ctx, 24, (uintptr_t)cqp); + set_64bit_val(cqp->host_ctx, 32, 0); + set_64bit_val(cqp->host_ctx, 40, 0); + set_64bit_val(cqp->host_ctx, 48, 0); + set_64bit_val(cqp->host_ctx, 56, 0); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQP_HOST_CTX", + cqp->host_ctx, I40IW_CQP_CTX_SIZE * 8); + + p1 = RS_32_1(cqp->host_ctx_pa, 32); + p2 = (u32)cqp->host_ctx_pa; + + if (cqp->dev->is_pf) { + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, p1); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, p2); + } else { + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, p1); + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, p2); + } + do { + if (cnt++ > I40IW_DONE_COUNT) { + i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); + ret_code = I40IW_ERR_TIMEOUT; + /* + * read PFPE_CQPERRORCODES register to get the minor + * and major error code + */ + if (cqp->dev->is_pf) + err_code = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CQPERRCODES); + else + err_code = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CQPERRCODES1); + *min_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE); + *maj_err = RS_32(err_code, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE); + goto exit; + } + udelay(I40IW_SLEEP_COUNT); + if (cqp->dev->is_pf) + val = i40iw_rd32(cqp->dev->hw, I40E_PFPE_CCQPSTATUS); + else + val = i40iw_rd32(cqp->dev->hw, I40E_VFPE_CCQPSTATUS1); + } while (!val); + +exit: + if (!ret_code) + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return ret_code; +} + +/** + * i40iw_sc_cqp_post_sq - post of cqp's sq + * @cqp: struct for cqp hw + */ +void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp) +{ + if (cqp->dev->is_pf) + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CQPDB, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); + else + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CQPDB1, I40IW_RING_GETCURRENT_HEAD(cqp->sq_ring)); + + i40iw_debug(cqp->dev, + I40IW_DEBUG_WQE, + "%s: HEAD_TAIL[%04d,%04d,%04d]\n", + __func__, + cqp->sq_ring.head, + cqp->sq_ring.tail, + cqp->sq_ring.size); +} + +/** + * i40iw_sc_cqp_get_next_send_wqe_idx - get next WQE on CQP SQ and pass back the index + * @cqp: pointer to CQP structure + * @scratch: private data for CQP WQE + * @wqe_idx: WQE index for next WQE on CQP SQ + */ +static u64 *i40iw_sc_cqp_get_next_send_wqe_idx(struct i40iw_sc_cqp *cqp, + u64 scratch, u32 *wqe_idx) +{ + u64 *wqe = NULL; + enum i40iw_status_code ret_code; + + if (I40IW_RING_FULL_ERR(cqp->sq_ring)) { + i40iw_debug(cqp->dev, + I40IW_DEBUG_WQE, + "%s: ring is full head %x tail %x size %x\n", + __func__, + cqp->sq_ring.head, + cqp->sq_ring.tail, + cqp->sq_ring.size); + return NULL; + } + I40IW_ATOMIC_RING_MOVE_HEAD(cqp->sq_ring, *wqe_idx, ret_code); + cqp->dev->cqp_cmd_stats[OP_REQUESTED_COMMANDS]++; + if (ret_code) + return NULL; + if (!*wqe_idx) + cqp->polarity = !cqp->polarity; + + wqe = cqp->sq_base[*wqe_idx].elem; + cqp->scratch_array[*wqe_idx] = scratch; + I40IW_CQP_INIT_WQE(wqe); + + return wqe; +} + +/** + * i40iw_sc_cqp_get_next_send_wqe - get next wqe on cqp sq + * @cqp: struct for cqp hw + * @scratch: private data for CQP WQE + */ +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch) +{ + u32 wqe_idx; + + return i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); +} + +/** + * i40iw_sc_cqp_destroy - destroy cqp during close + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_cqp_destroy(struct i40iw_sc_cqp *cqp) +{ + u32 cnt = 0, val = 1; + enum i40iw_status_code ret_code = 0; + u32 cqpstat_addr; + + if (cqp->dev->is_pf) { + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPHIGH, 0); + i40iw_wr32(cqp->dev->hw, I40E_PFPE_CCQPLOW, 0); + cqpstat_addr = I40E_PFPE_CCQPSTATUS; + } else { + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPHIGH1, 0); + i40iw_wr32(cqp->dev->hw, I40E_VFPE_CCQPLOW1, 0); + cqpstat_addr = I40E_VFPE_CCQPSTATUS1; + } + do { + if (cnt++ > I40IW_DONE_COUNT) { + ret_code = I40IW_ERR_TIMEOUT; + break; + } + udelay(I40IW_SLEEP_COUNT); + val = i40iw_rd32(cqp->dev->hw, cqpstat_addr); + } while (val); + + i40iw_free_dma_mem(cqp->dev->hw, &cqp->sdbuf); + return ret_code; +} + +/** + * i40iw_sc_ccq_arm - enable intr for control cq + * @ccq: ccq sc struct + */ +static void i40iw_sc_ccq_arm(struct i40iw_sc_cq *ccq) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next_se; + u8 arm_seq_num; + + /* write to cq doorbell shadow area */ + /* arm next se should always be zero */ + get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val); + + sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); + arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); + + arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); + arm_seq_num++; + + temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | + LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | + LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | + LS_64(1, I40IW_CQ_DBSA_ARM_NEXT); + + set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val); + + wmb(); /* make sure shadow area is updated before arming */ + + if (ccq->dev->is_pf) + i40iw_wr32(ccq->dev->hw, I40E_PFPE_CQARM, ccq->cq_uk.cq_id); + else + i40iw_wr32(ccq->dev->hw, I40E_VFPE_CQARM1, ccq->cq_uk.cq_id); +} + +/** + * i40iw_sc_ccq_get_cqe_info - get ccq's cq entry + * @ccq: ccq sc struct + * @info: completion q entry to return + */ +static enum i40iw_status_code i40iw_sc_ccq_get_cqe_info( + struct i40iw_sc_cq *ccq, + struct i40iw_ccq_cqe_info *info) +{ + u64 qp_ctx, temp, temp1; + u64 *cqe; + struct i40iw_sc_cqp *cqp; + u32 wqe_idx; + u8 polarity; + enum i40iw_status_code ret_code = 0; + + if (ccq->cq_uk.avoid_mem_cflct) + cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk); + else + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk); + + get_64bit_val(cqe, 24, &temp); + polarity = (u8)RS_64(temp, I40IW_CQ_VALID); + if (polarity != ccq->cq_uk.polarity) + return I40IW_ERR_QUEUE_EMPTY; + + get_64bit_val(cqe, 8, &qp_ctx); + cqp = (struct i40iw_sc_cqp *)(unsigned long)qp_ctx; + info->error = (bool)RS_64(temp, I40IW_CQ_ERROR); + info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); + if (info->error) { + info->maj_err_code = (u16)RS_64(temp, I40IW_CQ_MAJERR); + info->min_err_code = (u16)RS_64(temp, I40IW_CQ_MINERR); + } + wqe_idx = (u32)RS_64(temp, I40IW_CQ_WQEIDX); + info->scratch = cqp->scratch_array[wqe_idx]; + + get_64bit_val(cqe, 16, &temp1); + info->op_ret_val = (u32)RS_64(temp1, I40IW_CCQ_OPRETVAL); + get_64bit_val(cqp->sq_base[wqe_idx].elem, 24, &temp1); + info->op_code = (u8)RS_64(temp1, I40IW_CQPSQ_OPCODE); + info->cqp = cqp; + + /* move the head for cq */ + I40IW_RING_MOVE_HEAD(ccq->cq_uk.cq_ring, ret_code); + if (I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring) == 0) + ccq->cq_uk.polarity ^= 1; + + /* update cq tail in cq shadow memory also */ + I40IW_RING_MOVE_TAIL(ccq->cq_uk.cq_ring); + set_64bit_val(ccq->cq_uk.shadow_area, + 0, + I40IW_RING_GETCURRENT_HEAD(ccq->cq_uk.cq_ring)); + wmb(); /* write shadow area before tail */ + I40IW_RING_MOVE_TAIL(cqp->sq_ring); + ccq->dev->cqp_cmd_stats[OP_COMPLETED_COMMANDS]++; + + return ret_code; +} + +/** + * i40iw_sc_poll_for_cqp_op_done - Waits for last write to complete in CQP SQ + * @cqp: struct for cqp hw + * @op_code: cqp opcode for completion + * @info: completion q entry to return + */ +static enum i40iw_status_code i40iw_sc_poll_for_cqp_op_done( + struct i40iw_sc_cqp *cqp, + u8 op_code, + struct i40iw_ccq_cqe_info *compl_info) +{ + struct i40iw_ccq_cqe_info info; + struct i40iw_sc_cq *ccq; + enum i40iw_status_code ret_code = 0; + u32 cnt = 0; + + memset(&info, 0, sizeof(info)); + ccq = cqp->dev->ccq; + while (1) { + if (cnt++ > I40IW_DONE_COUNT) + return I40IW_ERR_TIMEOUT; + + if (i40iw_sc_ccq_get_cqe_info(ccq, &info)) { + udelay(I40IW_SLEEP_COUNT); + continue; + } + + if (info.error) { + ret_code = I40IW_ERR_CQP_COMPL_ERROR; + break; + } + /* check if opcode is cq create */ + if (op_code != info.op_code) { + i40iw_debug(cqp->dev, I40IW_DEBUG_WQE, + "%s: opcode mismatch for my op code 0x%x, returned opcode %x\n", + __func__, op_code, info.op_code); + } + /* success, exit out of the loop */ + if (op_code == info.op_code) + break; + } + + if (compl_info) + memcpy(compl_info, &info, sizeof(*compl_info)); + + return ret_code; +} + +/** + * i40iw_sc_manage_push_page - Handle push page + * @cqp: struct for cqp hw + * @info: push page info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_push_page( + struct i40iw_sc_cqp *cqp, + struct i40iw_cqp_manage_push_page_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + if (info->push_idx >= I40IW_MAX_PUSH_PAGE_COUNT) + return I40IW_ERR_INVALID_PUSH_PAGE_INDEX; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, info->qs_handle); + + header = LS_64(info->push_idx, I40IW_CQPSQ_MPP_PPIDX) | + LS_64(I40IW_CQP_OP_MANAGE_PUSH_PAGES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(info->free_page, I40IW_CQPSQ_MPP_FREE_PAGE); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_PUSH_PAGES WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_hmc_pm_func_table - manage of function table + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @vf_index: vf index for cqp + * @free_pm_fcn: function number + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 vf_index, + bool free_pm_fcn, + bool post_sq) +{ + u64 *wqe; + u64 header; + + if (vf_index >= I40IW_MAX_VF_PER_PF) + return I40IW_ERR_INVALID_VF_ID; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = LS_64(vf_index, I40IW_CQPSQ_MHMC_VFIDX) | + LS_64(I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(free_pm_fcn, I40IW_CQPSQ_MHMC_FREEPMFN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_set_hmc_resource_profile - cqp wqe for hmc profile + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_profile_type: type of profile to set + * @vf_num: vf number for profile + * @post_sq: flag for cqp db to ring + * @poll_registers: flag to poll register for cqp completion + */ +static enum i40iw_status_code i40iw_sc_set_hmc_resource_profile( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_profile_type, + u8 vf_num, bool post_sq, + bool poll_registers) +{ + u64 *wqe; + u64 header; + u32 val, tail, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, + (LS_64(hmc_profile_type, I40IW_CQPSQ_SHMCRP_HMC_PROFILE) | + LS_64(vf_num, I40IW_CQPSQ_SHMCRP_VFNUM))); + + header = LS_64(I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_HMC_PM_FUNC_TABLE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (poll_registers) + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000000); + else + ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, + I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, + NULL); + } + + return ret_code; +} + +/** + * i40iw_sc_manage_hmc_pm_func_table_done - wait for cqp wqe completion for function table + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_manage_hmc_pm_func_table_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE, NULL); +} + +/** + * i40iw_sc_commit_fpm_values_done - wait for cqp eqe completion for fpm commit + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_commit_fpm_values_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_COMMIT_FPM_VALUES, NULL); +} + +/** + * i40iw_sc_commit_fpm_values - cqp wqe for commit fpm values + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @commit_fpm_mem; Memory for fpm values + * @post_sq: flag for cqp db to ring + * @wait_type: poll ccq or cqp registers for cqp completion + */ +static enum i40iw_status_code i40iw_sc_commit_fpm_values( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + struct i40iw_dma_mem *commit_fpm_mem, + bool post_sq, + u8 wait_type) +{ + u64 *wqe; + u64 header; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, hmc_fn_id); + set_64bit_val(wqe, 32, commit_fpm_mem->pa); + + header = LS_64(I40IW_CQP_OP_COMMIT_FPM_VALUES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "COMMIT_FPM_VALUES WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + + if (wait_type == I40IW_CQP_WAIT_POLL_REGS) + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) + ret_code = i40iw_sc_commit_fpm_values_done(cqp); + } + + return ret_code; +} + +/** + * i40iw_sc_query_fpm_values_done - poll for cqp wqe completion for query fpm + * @cqp: struct for cqp hw + */ +static enum i40iw_status_code i40iw_sc_query_fpm_values_done(struct i40iw_sc_cqp *cqp) +{ + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_QUERY_FPM_VALUES, NULL); +} + +/** + * i40iw_sc_query_fpm_values - cqp wqe query fpm values + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @query_fpm_mem: memory for return fpm values + * @post_sq: flag for cqp db to ring + * @wait_type: poll ccq or cqp registers for cqp completion + */ +static enum i40iw_status_code i40iw_sc_query_fpm_values( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + struct i40iw_dma_mem *query_fpm_mem, + bool post_sq, + u8 wait_type) +{ + u64 *wqe; + u64 header; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, hmc_fn_id); + set_64bit_val(wqe, 32, query_fpm_mem->pa); + + header = LS_64(I40IW_CQP_OP_QUERY_FPM_VALUES, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_FPM WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + /* read the tail from CQP_TAIL register */ + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (wait_type == I40IW_CQP_WAIT_POLL_REGS) + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + else if (wait_type == I40IW_CQP_WAIT_POLL_CQ) + ret_code = i40iw_sc_query_fpm_values_done(cqp); + } + + return ret_code; +} + +/** + * i40iw_sc_add_arp_cache_entry - cqp wqe add arp cache entry + * @cqp: struct for cqp hw + * @info: arp entry information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_add_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_add_arp_cache_entry_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 8, info->reach_max); + + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 16, temp); + + header = info->arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64((info->permanent ? 1 : 0), I40IW_CQPSQ_MAT_PERMANENT) | + LS_64(1, I40IW_CQPSQ_MAT_ENTRYVALID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_del_arp_cache_entry - dele arp cache entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_del_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u16 arp_index, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ARP_CACHE_DEL_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_query_arp_cache_entry - cqp wqe to query arp and arp index + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @arp_index: arp index to delete arp entry + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_query_arp_cache_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u16 arp_index, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + header = arp_index | + LS_64(I40IW_CQP_OP_MANAGE_ARP, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_MAT_QUERY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QUERY_ARP_CACHE_ENTRY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_apbvt_entry - for adding and deleting apbvt entries + * @cqp: struct for cqp hw + * @info: info for apbvt entry to add or delete + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_manage_apbvt_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_apbvt_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, info->port); + + header = LS_64(I40IW_CQP_OP_MANAGE_APBVT, I40IW_CQPSQ_OPCODE) | + LS_64(info->add, I40IW_CQPSQ_MAPT_ADDPORT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_APBVT WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_manage_qhash_table_entry - manage quad hash entries + * @cqp: struct for cqp hw + * @info: info for quad hash to manage + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + * + * This is called before connection establishment is started. For passive connections, when + * listener is created, it will call with entry type of I40IW_QHASH_TYPE_TCP_SYN with local + * ip address and tcp port. When SYN is received (passive connections) or + * sent (active connections), this routine is called with entry type of + * I40IW_QHASH_TYPE_TCP_ESTABLISHED and quad is passed in info. + * + * When iwarp connection is done and its state moves to RTS, the quad hash entry in + * the hardware will point to iwarp's qp number and requires no calls from the driver. + */ +static enum i40iw_status_code i40iw_sc_manage_qhash_table_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_qhash_table_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 qw1 = 0; + u64 qw2 = 0; + u64 temp; + struct i40iw_sc_vsi *vsi = info->vsi; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 0, temp); + + qw1 = LS_64(info->qp_num, I40IW_CQPSQ_QHASH_QPN) | + LS_64(info->dest_port, I40IW_CQPSQ_QHASH_DEST_PORT); + if (info->ipv4_valid) { + set_64bit_val(wqe, + 48, + LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); + } else { + set_64bit_val(wqe, + 56, + LS_64(info->dest_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | + LS_64(info->dest_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); + + set_64bit_val(wqe, + 48, + LS_64(info->dest_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | + LS_64(info->dest_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); + } + qw2 = LS_64(vsi->qos[info->user_pri].qs_handle, I40IW_CQPSQ_QHASH_QS_HANDLE); + if (info->vlan_valid) + qw2 |= LS_64(info->vlan_id, I40IW_CQPSQ_QHASH_VLANID); + set_64bit_val(wqe, 16, qw2); + if (info->entry_type == I40IW_QHASH_TYPE_TCP_ESTABLISHED) { + qw1 |= LS_64(info->src_port, I40IW_CQPSQ_QHASH_SRC_PORT); + if (!info->ipv4_valid) { + set_64bit_val(wqe, + 40, + LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR0) | + LS_64(info->src_ip[1], I40IW_CQPSQ_QHASH_ADDR1)); + set_64bit_val(wqe, + 32, + LS_64(info->src_ip[2], I40IW_CQPSQ_QHASH_ADDR2) | + LS_64(info->src_ip[3], I40IW_CQPSQ_QHASH_ADDR3)); + } else { + set_64bit_val(wqe, + 32, + LS_64(info->src_ip[0], I40IW_CQPSQ_QHASH_ADDR3)); + } + } + + set_64bit_val(wqe, 8, qw1); + temp = LS_64(cqp->polarity, I40IW_CQPSQ_QHASH_WQEVALID) | + LS_64(I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY, I40IW_CQPSQ_QHASH_OPCODE) | + LS_64(info->manage, I40IW_CQPSQ_QHASH_MANAGE) | + LS_64(info->ipv4_valid, I40IW_CQPSQ_QHASH_IPV4VALID) | + LS_64(info->vlan_valid, I40IW_CQPSQ_QHASH_VLANVALID) | + LS_64(info->entry_type, I40IW_CQPSQ_QHASH_ENTRYTYPE); + + i40iw_insert_wqe_hdr(wqe, temp); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE_QHASH WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_alloc_local_mac_ipaddr_entry - cqp wqe for loc mac entry + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_alloc_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ALLOCATE_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_add_local_mac_ipaddr_entry - add mac enry + * @cqp: struct for cqp hw + * @info:mac addr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_add_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + struct i40iw_local_mac_ipaddr_entry_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + temp = info->mac_addr[5] | + LS_64_1(info->mac_addr[4], 8) | + LS_64_1(info->mac_addr[3], 16) | + LS_64_1(info->mac_addr[2], 24) | + LS_64_1(info->mac_addr[1], 32) | + LS_64_1(info->mac_addr[0], 40); + + set_64bit_val(wqe, 32, temp); + + header = LS_64(info->entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | + LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "ADD_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_del_local_mac_ipaddr_entry - cqp wqe to dele local mac + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @entry_idx: index of mac entry + * @ ignore_ref_count: to force mac adde delete + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_del_local_mac_ipaddr_entry( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 entry_idx, + u8 ignore_ref_count, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(entry_idx, I40IW_CQPSQ_MLIPA_IPTABLEIDX) | + LS_64(I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_MLIPA_FREEENTRY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(ignore_ref_count, I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "DEL_LOCAL_MAC_IPADDR WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cqp_nop - send a nop wqe + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cqp_nop(struct i40iw_sc_cqp *cqp, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 header; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(I40IW_CQP_OP_NOP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "NOP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_ceq_init - initialize ceq + * @ceq: ceq sc structure + * @info: ceq initialization info + */ +static enum i40iw_status_code i40iw_sc_ceq_init(struct i40iw_sc_ceq *ceq, + struct i40iw_ceq_init_info *info) +{ + u32 pble_obj_cnt; + + if ((info->elem_cnt < I40IW_MIN_CEQ_ENTRIES) || + (info->elem_cnt > I40IW_MAX_CEQ_ENTRIES)) + return I40IW_ERR_INVALID_SIZE; + + if (info->ceq_id >= I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + ceq->size = sizeof(*ceq); + ceq->ceqe_base = (struct i40iw_ceqe *)info->ceqe_base; + ceq->ceq_id = info->ceq_id; + ceq->dev = info->dev; + ceq->elem_cnt = info->elem_cnt; + ceq->ceq_elem_pa = info->ceqe_pa; + ceq->virtual_map = info->virtual_map; + + ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); + ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); + ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); + + ceq->tph_en = info->tph_en; + ceq->tph_val = info->tph_val; + ceq->polarity = 1; + I40IW_RING_INIT(ceq->ceq_ring, ceq->elem_cnt); + ceq->dev->ceq[info->ceq_id] = ceq; + + return 0; +} + +/** + * i40iw_sc_ceq_create - create ceq wqe + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ceq_create(struct i40iw_sc_ceq *ceq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = ceq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, ceq->elem_cnt); + set_64bit_val(wqe, 32, (ceq->virtual_map ? 0 : ceq->ceq_elem_pa)); + set_64bit_val(wqe, 48, (ceq->virtual_map ? ceq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(ceq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = ceq->ceq_id | + LS_64(I40IW_CQP_OP_CREATE_CEQ, I40IW_CQPSQ_OPCODE) | + LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | + LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | + LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cceq_create_done - poll for control ceq wqe to complete + * @ceq: ceq sc structure + */ +static enum i40iw_status_code i40iw_sc_cceq_create_done(struct i40iw_sc_ceq *ceq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CEQ, NULL); +} + +/** + * i40iw_sc_cceq_destroy_done - poll for destroy cceq to complete + * @ceq: ceq sc structure + */ +static enum i40iw_status_code i40iw_sc_cceq_destroy_done(struct i40iw_sc_ceq *ceq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ceq->dev->cqp; + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_CEQ, NULL); +} + +/** + * i40iw_sc_cceq_create - create cceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code i40iw_sc_cceq_create(struct i40iw_sc_ceq *ceq, u64 scratch) +{ + enum i40iw_status_code ret_code; + + ret_code = i40iw_sc_ceq_create(ceq, scratch, true); + if (!ret_code) + ret_code = i40iw_sc_cceq_create_done(ceq); + return ret_code; +} + +/** + * i40iw_sc_ceq_destroy - destroy ceq + * @ceq: ceq sc structure + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ceq_destroy(struct i40iw_sc_ceq *ceq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = ceq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, ceq->elem_cnt); + set_64bit_val(wqe, 48, ceq->first_pm_pbl_idx); + header = ceq->ceq_id | + LS_64(I40IW_CQP_OP_DESTROY_CEQ, I40IW_CQPSQ_OPCODE) | + LS_64(ceq->pbl_chunk_size, I40IW_CQPSQ_CEQ_LPBLSIZE) | + LS_64(ceq->virtual_map, I40IW_CQPSQ_CEQ_VMAP) | + LS_64(ceq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CEQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_process_ceq - process ceq + * @dev: sc device struct + * @ceq: ceq sc structure + */ +static void *i40iw_sc_process_ceq(struct i40iw_sc_dev *dev, struct i40iw_sc_ceq *ceq) +{ + u64 temp; + u64 *ceqe; + struct i40iw_sc_cq *cq = NULL; + u8 polarity; + + ceqe = (u64 *)I40IW_GET_CURRENT_CEQ_ELEMENT(ceq); + get_64bit_val(ceqe, 0, &temp); + polarity = (u8)RS_64(temp, I40IW_CEQE_VALID); + if (polarity != ceq->polarity) + return cq; + + cq = (struct i40iw_sc_cq *)(unsigned long)LS_64_1(temp, 1); + + I40IW_RING_MOVE_TAIL(ceq->ceq_ring); + if (I40IW_RING_GETCURRENT_TAIL(ceq->ceq_ring) == 0) + ceq->polarity ^= 1; + + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFPE_CQACK, cq->cq_uk.cq_id); + else + i40iw_wr32(dev->hw, I40E_VFPE_CQACK1, cq->cq_uk.cq_id); + + return cq; +} + +/** + * i40iw_sc_aeq_init - initialize aeq + * @aeq: aeq structure ptr + * @info: aeq initialization info + */ +static enum i40iw_status_code i40iw_sc_aeq_init(struct i40iw_sc_aeq *aeq, + struct i40iw_aeq_init_info *info) +{ + u32 pble_obj_cnt; + + if ((info->elem_cnt < I40IW_MIN_AEQ_ENTRIES) || + (info->elem_cnt > I40IW_MAX_AEQ_ENTRIES)) + return I40IW_ERR_INVALID_SIZE; + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + aeq->size = sizeof(*aeq); + aeq->polarity = 1; + aeq->aeqe_base = (struct i40iw_sc_aeqe *)info->aeqe_base; + aeq->dev = info->dev; + aeq->elem_cnt = info->elem_cnt; + + aeq->aeq_elem_pa = info->aeq_elem_pa; + I40IW_RING_INIT(aeq->aeq_ring, aeq->elem_cnt); + info->dev->aeq = aeq; + + aeq->virtual_map = info->virtual_map; + aeq->pbl_list = (aeq->virtual_map ? info->pbl_list : NULL); + aeq->pbl_chunk_size = (aeq->virtual_map ? info->pbl_chunk_size : 0); + aeq->first_pm_pbl_idx = (aeq->virtual_map ? info->first_pm_pbl_idx : 0); + info->dev->aeq = aeq; + return 0; +} + +/** + * i40iw_sc_aeq_create - create aeq + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_aeq_create(struct i40iw_sc_aeq *aeq, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = aeq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, aeq->elem_cnt); + set_64bit_val(wqe, 32, + (aeq->virtual_map ? 0 : aeq->aeq_elem_pa)); + set_64bit_val(wqe, 48, + (aeq->virtual_map ? aeq->first_pm_pbl_idx : 0)); + + header = LS_64(I40IW_CQP_OP_CREATE_AEQ, I40IW_CQPSQ_OPCODE) | + LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | + LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_aeq_destroy - destroy aeq during close + * @aeq: aeq structure ptr + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_aeq_destroy(struct i40iw_sc_aeq *aeq, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = aeq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, aeq->elem_cnt); + set_64bit_val(wqe, 48, aeq->first_pm_pbl_idx); + header = LS_64(I40IW_CQP_OP_DESTROY_AEQ, I40IW_CQPSQ_OPCODE) | + LS_64(aeq->pbl_chunk_size, I40IW_CQPSQ_AEQ_LPBLSIZE) | + LS_64(aeq->virtual_map, I40IW_CQPSQ_AEQ_VMAP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "AEQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_get_next_aeqe - get next aeq entry + * @aeq: aeq structure ptr + * @info: aeqe info to be returned + */ +static enum i40iw_status_code i40iw_sc_get_next_aeqe(struct i40iw_sc_aeq *aeq, + struct i40iw_aeqe_info *info) +{ + u64 temp, compl_ctx; + u64 *aeqe; + u16 wqe_idx; + u8 ae_src; + u8 polarity; + + aeqe = (u64 *)I40IW_GET_CURRENT_AEQ_ELEMENT(aeq); + get_64bit_val(aeqe, 0, &compl_ctx); + get_64bit_val(aeqe, 8, &temp); + polarity = (u8)RS_64(temp, I40IW_AEQE_VALID); + + if (aeq->polarity != polarity) + return I40IW_ERR_QUEUE_EMPTY; + + i40iw_debug_buf(aeq->dev, I40IW_DEBUG_WQE, "AEQ_ENTRY", aeqe, 16); + + ae_src = (u8)RS_64(temp, I40IW_AEQE_AESRC); + wqe_idx = (u16)RS_64(temp, I40IW_AEQE_WQDESCIDX); + info->qp_cq_id = (u32)RS_64(temp, I40IW_AEQE_QPCQID); + info->ae_id = (u16)RS_64(temp, I40IW_AEQE_AECODE); + info->tcp_state = (u8)RS_64(temp, I40IW_AEQE_TCPSTATE); + info->iwarp_state = (u8)RS_64(temp, I40IW_AEQE_IWSTATE); + info->q2_data_written = (u8)RS_64(temp, I40IW_AEQE_Q2DATA); + info->aeqe_overflow = (bool)RS_64(temp, I40IW_AEQE_OVERFLOW); + + switch (info->ae_id) { + case I40IW_AE_PRIV_OPERATION_DENIED: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: + case I40IW_AE_BAD_CLOSE: + case I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE: + case I40IW_AE_RDMA_READ_WHILE_ORD_ZERO: + case I40IW_AE_STAG_ZERO_INVALID: + case I40IW_AE_IB_RREQ_AND_Q1_FULL: + case I40IW_AE_WQE_UNEXPECTED_OPCODE: + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + case I40IW_AE_DDP_UBE_INVALID_MO: + case I40IW_AE_DDP_UBE_INVALID_QN: + case I40IW_AE_DDP_NO_L_BIT: + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST: + case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: + case I40IW_AE_INVALID_ARP_ENTRY: + case I40IW_AE_INVALID_TCP_OPTION_RCVD: + case I40IW_AE_STALE_ARP_ENTRY: + case I40IW_AE_LLP_CLOSE_COMPLETE: + case I40IW_AE_LLP_CONNECTION_RESET: + case I40IW_AE_LLP_FIN_RECEIVED: + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + case I40IW_AE_LLP_SYN_RECEIVED: + case I40IW_AE_LLP_TERMINATE_RECEIVED: + case I40IW_AE_LLP_TOO_MANY_RETRIES: + case I40IW_AE_LLP_DOUBT_REACHABILITY: + case I40IW_AE_RESET_SENT: + case I40IW_AE_TERMINATE_SENT: + case I40IW_AE_RESET_NOT_SENT: + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_QP_SUSPEND_COMPLETE: + info->qp = true; + info->compl_ctx = compl_ctx; + ae_src = I40IW_AE_SOURCE_RSVD; + break; + case I40IW_AE_LCE_CQ_CATASTROPHIC: + info->cq = true; + info->compl_ctx = LS_64_1(compl_ctx, 1); + ae_src = I40IW_AE_SOURCE_RSVD; + break; + } + + switch (ae_src) { + case I40IW_AE_SOURCE_RQ: + case I40IW_AE_SOURCE_RQ_0011: + info->qp = true; + info->wqe_idx = wqe_idx; + info->compl_ctx = compl_ctx; + break; + case I40IW_AE_SOURCE_CQ: + case I40IW_AE_SOURCE_CQ_0110: + case I40IW_AE_SOURCE_CQ_1010: + case I40IW_AE_SOURCE_CQ_1110: + info->cq = true; + info->compl_ctx = LS_64_1(compl_ctx, 1); + break; + case I40IW_AE_SOURCE_SQ: + case I40IW_AE_SOURCE_SQ_0111: + info->qp = true; + info->sq = true; + info->wqe_idx = wqe_idx; + info->compl_ctx = compl_ctx; + break; + case I40IW_AE_SOURCE_IN_RR_WR: + case I40IW_AE_SOURCE_IN_RR_WR_1011: + info->qp = true; + info->compl_ctx = compl_ctx; + info->in_rdrsp_wr = true; + break; + case I40IW_AE_SOURCE_OUT_RR: + case I40IW_AE_SOURCE_OUT_RR_1111: + info->qp = true; + info->compl_ctx = compl_ctx; + info->out_rdrsp = true; + break; + case I40IW_AE_SOURCE_RSVD: + /* fallthrough */ + default: + break; + } + I40IW_RING_MOVE_TAIL(aeq->aeq_ring); + if (I40IW_RING_GETCURRENT_TAIL(aeq->aeq_ring) == 0) + aeq->polarity ^= 1; + return 0; +} + +/** + * i40iw_sc_repost_aeq_entries - repost completed aeq entries + * @dev: sc device struct + * @count: allocate count + */ +static enum i40iw_status_code i40iw_sc_repost_aeq_entries(struct i40iw_sc_dev *dev, + u32 count) +{ + + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFPE_AEQALLOC, count); + else + i40iw_wr32(dev->hw, I40E_VFPE_AEQALLOC1, count); + + return 0; +} + +/** + * i40iw_sc_aeq_create_done - create aeq + * @aeq: aeq structure ptr + */ +static enum i40iw_status_code i40iw_sc_aeq_create_done(struct i40iw_sc_aeq *aeq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = aeq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_AEQ, NULL); +} + +/** + * i40iw_sc_aeq_destroy_done - destroy of aeq during close + * @aeq: aeq structure ptr + */ +static enum i40iw_status_code i40iw_sc_aeq_destroy_done(struct i40iw_sc_aeq *aeq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = aeq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_DESTROY_AEQ, NULL); +} + +/** + * i40iw_sc_ccq_init - initialize control cq + * @cq: sc's cq ctruct + * @info: info for control cq initialization + */ +static enum i40iw_status_code i40iw_sc_ccq_init(struct i40iw_sc_cq *cq, + struct i40iw_ccq_init_info *info) +{ + u32 pble_obj_cnt; + + if (info->num_elem < I40IW_MIN_CQ_SIZE || info->num_elem > I40IW_MAX_CQ_SIZE) + return I40IW_ERR_INVALID_SIZE; + + if (info->ceq_id > I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cq->cq_pa = info->cq_pa; + cq->cq_uk.cq_base = info->cq_base; + cq->shadow_area_pa = info->shadow_area_pa; + cq->cq_uk.shadow_area = info->shadow_area; + cq->shadow_read_threshold = info->shadow_read_threshold; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + cq->cq_uk.cq_size = info->num_elem; + cq->cq_type = I40IW_CQ_TYPE_CQP; + cq->ceqe_mask = info->ceqe_mask; + I40IW_RING_INIT(cq->cq_uk.cq_ring, info->num_elem); + + cq->cq_uk.cq_id = 0; /* control cq is id 0 always */ + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + cq->cq_uk.avoid_mem_cflct = info->avoid_mem_cflct; + + cq->pbl_list = info->pbl_list; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + cq->cq_uk.polarity = true; + + /* following are only for iw cqs so initialize them to zero */ + cq->cq_uk.cqe_alloc_reg = NULL; + info->dev->ccq = cq; + return 0; +} + +/** + * i40iw_sc_ccq_create_done - poll cqp for ccq create + * @ccq: ccq sc struct + */ +static enum i40iw_status_code i40iw_sc_ccq_create_done(struct i40iw_sc_cq *ccq) +{ + struct i40iw_sc_cqp *cqp; + + cqp = ccq->dev->cqp; + return i40iw_sc_poll_for_cqp_op_done(cqp, I40IW_CQP_OP_CREATE_CQ, NULL); +} + +/** + * i40iw_sc_ccq_create - create control cq + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @check_overflow: overlow flag for ccq + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ccq_create(struct i40iw_sc_cq *ccq, + u64 scratch, + bool check_overflow, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + enum i40iw_status_code ret_code; + + cqp = ccq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); + set_64bit_val(wqe, 16, + LS_64(ccq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, (ccq->virtual_map ? 0 : ccq->cq_pa)); + set_64bit_val(wqe, 40, ccq->shadow_area_pa); + set_64bit_val(wqe, 48, + (ccq->virtual_map ? ccq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, + LS_64(ccq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = ccq->cq_uk.cq_id | + LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(ccq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(ccq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_sc_ccq_create_done(ccq); + if (ret_code) + return ret_code; + } + cqp->process_cqp_sds = i40iw_cqp_sds_cmd; + + return 0; +} + +/** + * i40iw_sc_ccq_destroy - destroy ccq during close + * @ccq: ccq sc struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + enum i40iw_status_code ret_code = 0; + u32 tail, val, error; + + cqp = ccq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, ccq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(ccq, 1)); + set_64bit_val(wqe, 40, ccq->shadow_area_pa); + + header = ccq->cq_uk.cq_id | + LS_64((ccq->ceq_id_valid ? ccq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(ccq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ccq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(ccq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(ccq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CCQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); + } + + cqp->process_cqp_sds = i40iw_update_sds_noccq; + + return ret_code; +} + +/** + * i40iw_sc_cq_init - initialize completion q + * @cq: cq struct + * @info: cq initialization info + */ +static enum i40iw_status_code i40iw_sc_cq_init(struct i40iw_sc_cq *cq, + struct i40iw_cq_init_info *info) +{ + u32 __iomem *cqe_alloc_reg = NULL; + enum i40iw_status_code ret_code; + u32 pble_obj_cnt; + u32 arm_offset; + + pble_obj_cnt = info->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->virtual_map && (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cq->cq_pa = info->cq_base_pa; + cq->dev = info->dev; + cq->ceq_id = info->ceq_id; + arm_offset = (info->dev->is_pf) ? I40E_PFPE_CQARM : I40E_VFPE_CQARM1; + if (i40iw_get_hw_addr(cq->dev)) + cqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(cq->dev) + + arm_offset); + info->cq_uk_init_info.cqe_alloc_reg = cqe_alloc_reg; + ret_code = i40iw_cq_uk_init(&cq->cq_uk, &info->cq_uk_init_info); + if (ret_code) + return ret_code; + cq->virtual_map = info->virtual_map; + cq->pbl_chunk_size = info->pbl_chunk_size; + cq->ceqe_mask = info->ceqe_mask; + cq->cq_type = (info->type) ? info->type : I40IW_CQ_TYPE_IWARP; + + cq->shadow_area_pa = info->shadow_area_pa; + cq->shadow_read_threshold = info->shadow_read_threshold; + + cq->ceq_id_valid = info->ceq_id_valid; + cq->tph_en = info->tph_en; + cq->tph_val = info->tph_val; + + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + return 0; +} + +/** + * i40iw_sc_cq_create - create completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @check_overflow: flag for overflow check + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cq_create(struct i40iw_sc_cq *cq, + u64 scratch, + bool check_overflow, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + if (cq->cq_uk.cq_id > I40IW_MAX_CQID) + return I40IW_ERR_INVALID_CQ_ID; + + if (cq->ceq_id > I40IW_MAX_CEQID) + return I40IW_ERR_INVALID_CEQ_ID; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 0, cq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, + 16, + LS_64(cq->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + + set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); + + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = cq->cq_uk.cq_id | + LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cq_destroy - destroy completion q + * @cq: cq struct + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_cq_destroy(struct i40iw_sc_cq *cq, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 0, cq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + + header = cq->cq_uk.cq_id | + LS_64((cq->ceq_id_valid ? cq->ceq_id : 0), I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_DESTROY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(cq->pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(cq->virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(cq->ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_cq_modify - modify a Completion Queue + * @cq: cq struct + * @info: modification info struct + * @scratch: + * @post_sq: flag to post to sq + */ +static enum i40iw_status_code i40iw_sc_cq_modify(struct i40iw_sc_cq *cq, + struct i40iw_modify_cq_info *info, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + u32 cq_size, ceq_id, first_pm_pbl_idx; + u8 pbl_chunk_size; + bool virtual_map, ceq_id_valid, check_overflow; + u32 pble_obj_cnt; + + if (info->ceq_valid && (info->ceq_id > I40IW_MAX_CEQID)) + return I40IW_ERR_INVALID_CEQ_ID; + + pble_obj_cnt = cq->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->cq_resize && info->virtual_map && + (info->first_pm_pbl_idx >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cqp = cq->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + cq->pbl_list = info->pbl_list; + cq->cq_pa = info->cq_pa; + cq->first_pm_pbl_idx = info->first_pm_pbl_idx; + + cq_size = info->cq_resize ? info->cq_size : cq->cq_uk.cq_size; + if (info->ceq_change) { + ceq_id_valid = true; + ceq_id = info->ceq_id; + } else { + ceq_id_valid = cq->ceq_id_valid; + ceq_id = ceq_id_valid ? cq->ceq_id : 0; + } + virtual_map = info->cq_resize ? info->virtual_map : cq->virtual_map; + first_pm_pbl_idx = (info->cq_resize ? + (info->virtual_map ? info->first_pm_pbl_idx : 0) : + (cq->virtual_map ? cq->first_pm_pbl_idx : 0)); + pbl_chunk_size = (info->cq_resize ? + (info->virtual_map ? info->pbl_chunk_size : 0) : + (cq->virtual_map ? cq->pbl_chunk_size : 0)); + check_overflow = info->check_overflow_change ? info->check_overflow : + cq->check_overflow; + cq->cq_uk.cq_size = cq_size; + cq->ceq_id_valid = ceq_id_valid; + cq->ceq_id = ceq_id; + cq->virtual_map = virtual_map; + cq->first_pm_pbl_idx = first_pm_pbl_idx; + cq->pbl_chunk_size = pbl_chunk_size; + cq->check_overflow = check_overflow; + + set_64bit_val(wqe, 0, cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 16, + LS_64(info->shadow_read_threshold, I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, (cq->virtual_map ? 0 : cq->cq_pa)); + set_64bit_val(wqe, 40, cq->shadow_area_pa); + set_64bit_val(wqe, 48, (cq->virtual_map ? first_pm_pbl_idx : 0)); + set_64bit_val(wqe, 56, LS_64(cq->tph_val, I40IW_CQPSQ_TPHVAL)); + + header = cq->cq_uk.cq_id | + LS_64(ceq_id, I40IW_CQPSQ_CQ_CEQID) | + LS_64(I40IW_CQP_OP_MODIFY_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(info->cq_resize, I40IW_CQPSQ_CQ_CQRESIZE) | + LS_64(pbl_chunk_size, I40IW_CQPSQ_CQ_LPBLSIZE) | + LS_64(check_overflow, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(virtual_map, I40IW_CQPSQ_CQ_VIRTMAP) | + LS_64(cq->ceqe_mask, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(ceq_id_valid, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cq->tph_en, I40IW_CQPSQ_TPHEN) | + LS_64(cq->cq_uk.avoid_mem_cflct, I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "CQ_MODIFY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_init - initialize qp + * @qp: sc qp + * @info: initialization qp info + */ +static enum i40iw_status_code i40iw_sc_qp_init(struct i40iw_sc_qp *qp, + struct i40iw_qp_init_info *info) +{ + u32 __iomem *wqe_alloc_reg = NULL; + enum i40iw_status_code ret_code; + u32 pble_obj_cnt; + u8 wqe_size; + u32 offset; + + qp->dev = info->pd->dev; + qp->vsi = info->vsi; + qp->sq_pa = info->sq_pa; + qp->rq_pa = info->rq_pa; + qp->hw_host_ctx_pa = info->host_ctx_pa; + qp->q2_pa = info->q2_pa; + qp->shadow_area_pa = info->shadow_area_pa; + + qp->q2_buf = info->q2; + qp->pd = info->pd; + qp->hw_host_ctx = info->host_ctx; + offset = (qp->pd->dev->is_pf) ? I40E_PFPE_WQEALLOC : I40E_VFPE_WQEALLOC1; + if (i40iw_get_hw_addr(qp->pd->dev)) + wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + offset); + + info->qp_uk_init_info.wqe_alloc_reg = wqe_alloc_reg; + info->qp_uk_init_info.abi_ver = qp->pd->abi_ver; + ret_code = i40iw_qp_uk_init(&qp->qp_uk, &info->qp_uk_init_info); + if (ret_code) + return ret_code; + qp->virtual_map = info->virtual_map; + + pble_obj_cnt = info->pd->dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if ((info->virtual_map && (info->sq_pa >= pble_obj_cnt)) || + (info->virtual_map && (info->rq_pa >= pble_obj_cnt))) + return I40IW_ERR_INVALID_PBLE_INDEX; + + qp->llp_stream_handle = (void *)(-1); + qp->qp_type = (info->type) ? info->type : I40IW_QP_TYPE_IWARP; + + qp->hw_sq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.sq_ring.size, + false); + i40iw_debug(qp->dev, I40IW_DEBUG_WQE, "%s: hw_sq_size[%04d] sq_ring.size[%04d]\n", + __func__, qp->hw_sq_size, qp->qp_uk.sq_ring.size); + + switch (qp->pd->abi_ver) { + case 4: + ret_code = i40iw_fragcnt_to_wqesize_rq(qp->qp_uk.max_rq_frag_cnt, + &wqe_size); + if (ret_code) + return ret_code; + break; + case 5: /* fallthrough until next ABI version */ + default: + if (qp->qp_uk.max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) + return I40IW_ERR_INVALID_FRAG_COUNT; + wqe_size = I40IW_MAX_WQE_SIZE_RQ; + break; + } + qp->hw_rq_size = i40iw_get_encoded_wqe_size(qp->qp_uk.rq_size * + (wqe_size / I40IW_QP_WQE_MIN_SIZE), false); + i40iw_debug(qp->dev, I40IW_DEBUG_WQE, + "%s: hw_rq_size[%04d] qp_uk.rq_size[%04d] wqe_size[%04d]\n", + __func__, qp->hw_rq_size, qp->qp_uk.rq_size, wqe_size); + qp->sq_tph_val = info->sq_tph_val; + qp->rq_tph_val = info->rq_tph_val; + qp->sq_tph_en = info->sq_tph_en; + qp->rq_tph_en = info->rq_tph_en; + qp->rcv_tph_en = info->rcv_tph_en; + qp->xmit_tph_en = info->xmit_tph_en; + qp->qs_handle = qp->vsi->qos[qp->user_pri].qs_handle; + + return 0; +} + +/** + * i40iw_sc_qp_create - create qp + * @qp: sc qp + * @info: qp create info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_create( + struct i40iw_sc_qp *qp, + struct i40iw_create_qp_info *info, + u64 scratch, + bool post_sq) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + + if ((qp->qp_uk.qp_id < I40IW_MIN_IW_QP_ID) || + (qp->qp_uk.qp_id > I40IW_MAX_IW_QP_ID)) + return I40IW_ERR_INVALID_QP_ID; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | + LS_64((info->ord_valid ? 1 : 0), I40IW_CQPSQ_QP_ORDVALID) | + LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | + LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | + LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_CREATE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_modify - modify qp cqp wqe + * @qp: sc qp + * @info: modify qp info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_modify( + struct i40iw_sc_qp *qp, + struct i40iw_modify_qp_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + u8 term_actions = 0; + u8 term_len = 0; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + if (info->next_iwarp_state == I40IW_QP_STATE_TERMINATE) { + if (info->dont_send_fin) + term_actions += I40IWQP_TERM_SEND_TERM_ONLY; + if (info->dont_send_term) + term_actions += I40IWQP_TERM_SEND_FIN_ONLY; + if ((term_actions == I40IWQP_TERM_SEND_TERM_AND_FIN) || + (term_actions == I40IWQP_TERM_SEND_TERM_ONLY)) + term_len = info->termlen; + } + + set_64bit_val(wqe, + 8, + LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_MODIFY_QP, I40IW_CQPSQ_OPCODE) | + LS_64(info->ord_valid, I40IW_CQPSQ_QP_ORDVALID) | + LS_64(info->tcp_ctx_valid, I40IW_CQPSQ_QP_TOECTXVALID) | + LS_64(info->cached_var_valid, I40IW_CQPSQ_QP_CACHEDVARVALID) | + LS_64(qp->virtual_map, I40IW_CQPSQ_QP_VQ) | + LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | + LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | + LS_64(info->reset_tcp_conn, I40IW_CQPSQ_QP_RESETCON) | + LS_64(info->arp_cache_idx_valid, I40IW_CQPSQ_QP_ARPTABIDXVALID) | + LS_64(info->next_iwarp_state, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_MODIFY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_destroy - cqp destroy qp + * @qp: sc qp + * @scratch: u64 saved to be used during cqp completion + * @remove_hash_idx: flag if to remove hash idx + * @ignore_mw_bnd: memory window bind flag + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_destroy( + struct i40iw_sc_qp *qp, + u64 scratch, + bool remove_hash_idx, + bool ignore_mw_bnd, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + i40iw_qp_rem_qos(qp); + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_DESTROY_QP, I40IW_CQPSQ_OPCODE) | + LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(ignore_mw_bnd, I40IW_CQPSQ_QP_IGNOREMWBOUND) | + LS_64(remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_DESTROY WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_flush_wqes - flush qp's wqe + * @qp: sc qp + * @info: dlush information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_flush_wqes( + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + u64 scratch, + bool post_sq) +{ + u64 temp = 0; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + bool flush_sq = false, flush_rq = false; + + if (info->rq && !qp->flush_rq) + flush_rq = true; + + if (info->sq && !qp->flush_sq) + flush_sq = true; + + qp->flush_sq |= flush_sq; + qp->flush_rq |= flush_rq; + if (!flush_sq && !flush_rq) + return 0; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + if (info->userflushcode) { + if (flush_rq) { + temp |= LS_64(info->rq_minor_code, I40IW_CQPSQ_FWQE_RQMNERR) | + LS_64(info->rq_major_code, I40IW_CQPSQ_FWQE_RQMJERR); + } + if (flush_sq) { + temp |= LS_64(info->sq_minor_code, I40IW_CQPSQ_FWQE_SQMNERR) | + LS_64(info->sq_major_code, I40IW_CQPSQ_FWQE_SQMJERR); + } + } + set_64bit_val(wqe, 16, temp); + + temp = (info->generate_ae) ? + info->ae_code | LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE) : 0; + + set_64bit_val(wqe, 8, temp); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_FLUSH_WQES, I40IW_CQPSQ_OPCODE) | + LS_64(info->generate_ae, I40IW_CQPSQ_FWQE_GENERATE_AE) | + LS_64(info->userflushcode, I40IW_CQPSQ_FWQE_USERFLCODE) | + LS_64(flush_sq, I40IW_CQPSQ_FWQE_FLUSHSQ) | + LS_64(flush_rq, I40IW_CQPSQ_FWQE_FLUSHRQ) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "QP_FLUSH WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_gen_ae - generate AE, currently uses flush WQE CQP OP + * @qp: sc qp + * @info: gen ae information + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_gen_ae( + struct i40iw_sc_qp *qp, + struct i40iw_gen_ae_info *info, + u64 scratch, + bool post_sq) +{ + u64 temp; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = qp->pd->dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = info->ae_code | + LS_64(info->ae_source, I40IW_CQPSQ_FWQE_AESOURCE); + + set_64bit_val(wqe, 8, temp); + + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_GEN_AE, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_FWQE_GENERATE_AE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "GEN_AE WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_upload_context - upload qp's context + * @dev: sc device struct + * @info: upload context info ptr for return + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_qp_upload_context( + struct i40iw_sc_dev *dev, + struct i40iw_upload_context_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 16, info->buf_pa); + + header = LS_64(info->qp_id, I40IW_CQPSQ_UCTX_QPID) | + LS_64(I40IW_CQP_OP_UPLOAD_CONTEXT, I40IW_CQPSQ_OPCODE) | + LS_64(info->qp_type, I40IW_CQPSQ_UCTX_QPTYPE) | + LS_64(info->raw_format, I40IW_CQPSQ_UCTX_RAWFORMAT) | + LS_64(info->freeze_qp, I40IW_CQPSQ_UCTX_FREEZEQP) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QP_UPLOAD_CTX WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_qp_setctx - set qp's context + * @qp: sc qp + * @qp_ctx: context ptr + * @info: ctx info + */ +static enum i40iw_status_code i40iw_sc_qp_setctx( + struct i40iw_sc_qp *qp, + u64 *qp_ctx, + struct i40iw_qp_host_ctx_info *info) +{ + struct i40iwarp_offload_info *iw; + struct i40iw_tcp_offload_info *tcp; + struct i40iw_sc_vsi *vsi; + struct i40iw_sc_dev *dev; + u64 qw0, qw3, qw7 = 0; + + iw = info->iwarp_info; + tcp = info->tcp_info; + vsi = qp->vsi; + dev = qp->dev; + if (info->add_to_qoslist) { + qp->user_pri = info->user_pri; + i40iw_qp_add_qos(qp); + i40iw_debug(qp->dev, I40IW_DEBUG_DCB, "%s qp[%d] UP[%d] qset[%d]\n", + __func__, qp->qp_uk.qp_id, qp->user_pri, qp->qs_handle); + } + qw0 = LS_64(qp->qp_uk.rq_wqe_size, I40IWQPC_RQWQESIZE) | + LS_64(info->err_rq_idx_valid, I40IWQPC_ERR_RQ_IDX_VALID) | + LS_64(qp->rcv_tph_en, I40IWQPC_RCVTPHEN) | + LS_64(qp->xmit_tph_en, I40IWQPC_XMITTPHEN) | + LS_64(qp->rq_tph_en, I40IWQPC_RQTPHEN) | + LS_64(qp->sq_tph_en, I40IWQPC_SQTPHEN) | + LS_64(info->push_idx, I40IWQPC_PPIDX) | + LS_64(info->push_mode_en, I40IWQPC_PMENA); + + set_64bit_val(qp_ctx, 8, qp->sq_pa); + set_64bit_val(qp_ctx, 16, qp->rq_pa); + + qw3 = LS_64(qp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | + LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | + LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE); + + set_64bit_val(qp_ctx, + 128, + LS_64(info->err_rq_idx, I40IWQPC_ERR_RQ_IDX)); + + set_64bit_val(qp_ctx, + 136, + LS_64(info->send_cq_num, I40IWQPC_TXCQNUM) | + LS_64(info->rcv_cq_num, I40IWQPC_RXCQNUM)); + + set_64bit_val(qp_ctx, + 168, + LS_64(info->qp_compl_ctx, I40IWQPC_QPCOMPCTX)); + set_64bit_val(qp_ctx, + 176, + LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | + LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | + LS_64(qp->qs_handle, I40IWQPC_QSHANDLE) | + LS_64(vsi->exception_lan_queue, I40IWQPC_EXCEPTION_LAN_QUEUE)); + + if (info->iwarp_info_valid) { + qw0 |= LS_64(iw->ddp_ver, I40IWQPC_DDP_VER) | + LS_64(iw->rdmap_ver, I40IWQPC_RDMAP_VER); + + qw7 |= LS_64(iw->pd_id, I40IWQPC_PDIDX); + set_64bit_val(qp_ctx, + 144, + LS_64(qp->q2_pa, I40IWQPC_Q2ADDR) | + LS_64(vsi->fcn_id, I40IWQPC_STAT_INDEX)); + set_64bit_val(qp_ctx, + 152, + LS_64(iw->last_byte_sent, I40IWQPC_LASTBYTESENT)); + + set_64bit_val(qp_ctx, + 160, + LS_64(iw->ord_size, I40IWQPC_ORDSIZE) | + LS_64(iw->ird_size, I40IWQPC_IRDSIZE) | + LS_64(iw->wr_rdresp_en, I40IWQPC_WRRDRSPOK) | + LS_64(iw->rd_enable, I40IWQPC_RDOK) | + LS_64(iw->snd_mark_en, I40IWQPC_SNDMARKERS) | + LS_64(iw->bind_en, I40IWQPC_BINDEN) | + LS_64(iw->fast_reg_en, I40IWQPC_FASTREGEN) | + LS_64(iw->priv_mode_en, I40IWQPC_PRIVEN) | + LS_64((((vsi->stats_fcn_id_alloc) && + (dev->is_pf) && (vsi->fcn_id >= I40IW_FIRST_NON_PF_STAT)) ? 1 : 0), + I40IWQPC_USESTATSINSTANCE) | + LS_64(1, I40IWQPC_IWARPMODE) | + LS_64(iw->rcv_mark_en, I40IWQPC_RCVMARKERS) | + LS_64(iw->align_hdrs, I40IWQPC_ALIGNHDRS) | + LS_64(iw->rcv_no_mpa_crc, I40IWQPC_RCVNOMPACRC) | + LS_64(iw->rcv_mark_offset, I40IWQPC_RCVMARKOFFSET) | + LS_64(iw->snd_mark_offset, I40IWQPC_SNDMARKOFFSET)); + } + if (info->tcp_info_valid) { + qw0 |= LS_64(tcp->ipv4, I40IWQPC_IPV4) | + LS_64(tcp->no_nagle, I40IWQPC_NONAGLE) | + LS_64(tcp->insert_vlan_tag, I40IWQPC_INSERTVLANTAG) | + LS_64(tcp->time_stamp, I40IWQPC_TIMESTAMP) | + LS_64(tcp->cwnd_inc_limit, I40IWQPC_LIMIT) | + LS_64(tcp->drop_ooo_seg, I40IWQPC_DROPOOOSEG) | + LS_64(tcp->dup_ack_thresh, I40IWQPC_DUPACK_THRESH); + + qw3 |= LS_64(tcp->ttl, I40IWQPC_TTL) | + LS_64(tcp->src_mac_addr_idx, I40IWQPC_SRCMACADDRIDX) | + LS_64(tcp->avoid_stretch_ack, I40IWQPC_AVOIDSTRETCHACK) | + LS_64(tcp->tos, I40IWQPC_TOS) | + LS_64(tcp->src_port, I40IWQPC_SRCPORTNUM) | + LS_64(tcp->dst_port, I40IWQPC_DESTPORTNUM); + + qp->src_mac_addr_idx = tcp->src_mac_addr_idx; + set_64bit_val(qp_ctx, + 32, + LS_64(tcp->dest_ip_addr2, I40IWQPC_DESTIPADDR2) | + LS_64(tcp->dest_ip_addr3, I40IWQPC_DESTIPADDR3)); + + set_64bit_val(qp_ctx, + 40, + LS_64(tcp->dest_ip_addr0, I40IWQPC_DESTIPADDR0) | + LS_64(tcp->dest_ip_addr1, I40IWQPC_DESTIPADDR1)); + + set_64bit_val(qp_ctx, + 48, + LS_64(tcp->snd_mss, I40IWQPC_SNDMSS) | + LS_64(tcp->vlan_tag, I40IWQPC_VLANTAG) | + LS_64(tcp->arp_idx, I40IWQPC_ARPIDX)); + + qw7 |= LS_64(tcp->flow_label, I40IWQPC_FLOWLABEL) | + LS_64(tcp->wscale, I40IWQPC_WSCALE) | + LS_64(tcp->ignore_tcp_opt, I40IWQPC_IGNORE_TCP_OPT) | + LS_64(tcp->ignore_tcp_uns_opt, I40IWQPC_IGNORE_TCP_UNS_OPT) | + LS_64(tcp->tcp_state, I40IWQPC_TCPSTATE) | + LS_64(tcp->rcv_wscale, I40IWQPC_RCVSCALE) | + LS_64(tcp->snd_wscale, I40IWQPC_SNDSCALE); + + set_64bit_val(qp_ctx, + 72, + LS_64(tcp->time_stamp_recent, I40IWQPC_TIMESTAMP_RECENT) | + LS_64(tcp->time_stamp_age, I40IWQPC_TIMESTAMP_AGE)); + set_64bit_val(qp_ctx, + 80, + LS_64(tcp->snd_nxt, I40IWQPC_SNDNXT) | + LS_64(tcp->snd_wnd, I40IWQPC_SNDWND)); + + set_64bit_val(qp_ctx, + 88, + LS_64(tcp->rcv_nxt, I40IWQPC_RCVNXT) | + LS_64(tcp->rcv_wnd, I40IWQPC_RCVWND)); + set_64bit_val(qp_ctx, + 96, + LS_64(tcp->snd_max, I40IWQPC_SNDMAX) | + LS_64(tcp->snd_una, I40IWQPC_SNDUNA)); + set_64bit_val(qp_ctx, + 104, + LS_64(tcp->srtt, I40IWQPC_SRTT) | + LS_64(tcp->rtt_var, I40IWQPC_RTTVAR)); + set_64bit_val(qp_ctx, + 112, + LS_64(tcp->ss_thresh, I40IWQPC_SSTHRESH) | + LS_64(tcp->cwnd, I40IWQPC_CWND)); + set_64bit_val(qp_ctx, + 120, + LS_64(tcp->snd_wl1, I40IWQPC_SNDWL1) | + LS_64(tcp->snd_wl2, I40IWQPC_SNDWL2)); + set_64bit_val(qp_ctx, + 128, + LS_64(tcp->max_snd_window, I40IWQPC_MAXSNDWND) | + LS_64(tcp->rexmit_thresh, I40IWQPC_REXMIT_THRESH)); + set_64bit_val(qp_ctx, + 184, + LS_64(tcp->local_ipaddr3, I40IWQPC_LOCAL_IPADDR3) | + LS_64(tcp->local_ipaddr2, I40IWQPC_LOCAL_IPADDR2)); + set_64bit_val(qp_ctx, + 192, + LS_64(tcp->local_ipaddr1, I40IWQPC_LOCAL_IPADDR1) | + LS_64(tcp->local_ipaddr0, I40IWQPC_LOCAL_IPADDR0)); + } + + set_64bit_val(qp_ctx, 0, qw0); + set_64bit_val(qp_ctx, 24, qw3); + set_64bit_val(qp_ctx, 56, qw7); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "QP_HOST)CTX WQE", + qp_ctx, I40IW_QP_CTX_SIZE); + return 0; +} + +/** + * i40iw_sc_alloc_stag - mr stag alloc + * @dev: sc device struct + * @info: stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_alloc_stag( + struct i40iw_sc_dev *dev, + struct i40iw_allocate_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + enum i40iw_page_size page_size; + + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID) | + LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN)); + set_64bit_val(wqe, + 16, + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + set_64bit_val(wqe, + 40, + LS_64(info->hmc_fcn_index, I40IW_CQPSQ_STAG_HMCFNIDX)); + + header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | + LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | + LS_64(info->remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | + LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "ALLOC_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mr_reg_non_shared - non-shared mr registration + * @dev: sc device struct + * @info: mr info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mr_reg_non_shared( + struct i40iw_sc_dev *dev, + struct i40iw_reg_ns_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp; + struct i40iw_sc_cqp *cqp; + u64 header; + u32 pble_obj_cnt; + bool remote_access; + u8 addr_type; + enum i40iw_page_size page_size; + + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; + if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | + I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) + remote_access = true; + else + remote_access = false; + + pble_obj_cnt = dev->hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt; + + if (info->chunk_size && (info->first_pm_pbl_index >= pble_obj_cnt)) + return I40IW_ERR_INVALID_PBLE_INDEX; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo; + set_64bit_val(wqe, 0, temp); + + set_64bit_val(wqe, + 8, + LS_64(info->total_len, I40IW_CQPSQ_STAG_STAGLEN) | + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + + set_64bit_val(wqe, + 16, + LS_64(info->stag_key, I40IW_CQPSQ_STAG_KEY) | + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + if (!info->chunk_size) { + set_64bit_val(wqe, 32, info->reg_addr_pa); + set_64bit_val(wqe, 48, 0); + } else { + set_64bit_val(wqe, 32, 0); + set_64bit_val(wqe, 48, info->first_pm_pbl_index); + } + set_64bit_val(wqe, 40, info->hmc_fcn_index); + set_64bit_val(wqe, 56, 0); + + addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; + header = LS_64(I40IW_CQP_OP_REG_MR, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->chunk_size, I40IW_CQPSQ_STAG_LPBLSIZE) | + LS_64(page_size, I40IW_CQPSQ_STAG_HPAGESIZE) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | + LS_64(info->use_hmc_fcn_index, I40IW_CQPSQ_STAG_USEHMCFNIDX) | + LS_64(info->use_pf_rid, I40IW_CQPSQ_STAG_USEPFRID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_NS WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mr_reg_shared - registered shared memory region + * @dev: sc device struct + * @info: info for shared memory registeration + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mr_reg_shared( + struct i40iw_sc_dev *dev, + struct i40iw_register_shared_stag *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 temp, va64, fbo, header; + u32 va32; + bool remote_access; + u8 addr_type; + + if (info->access_rights & (I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY | + I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY)) + remote_access = true; + else + remote_access = false; + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + va64 = (uintptr_t)(info->va); + va32 = (u32)(va64 & 0x00000000FFFFFFFF); + fbo = (u64)(va32 & (4096 - 1)); + + set_64bit_val(wqe, + 0, + (info->addr_type == I40IW_ADDR_TYPE_VA_BASED ? (uintptr_t)info->va : fbo)); + + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + temp = LS_64(info->new_stag_key, I40IW_CQPSQ_STAG_KEY) | + LS_64(info->new_stag_idx, I40IW_CQPSQ_STAG_IDX) | + LS_64(info->parent_stag_idx, I40IW_CQPSQ_STAG_PARENTSTAGIDX); + set_64bit_val(wqe, 16, temp); + + addr_type = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? 1 : 0; + header = LS_64(I40IW_CQP_OP_REG_SMR, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_STAG_MR) | + LS_64(info->access_rights, I40IW_CQPSQ_STAG_ARIGHTS) | + LS_64(remote_access, I40IW_CQPSQ_STAG_REMACCENABLED) | + LS_64(addr_type, I40IW_CQPSQ_STAG_VABASEDTO) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MR_REG_SHARED WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_dealloc_stag - deallocate stag + * @dev: sc device struct + * @info: dealloc stag info + * @scratch: u64 saved to be used during cqp completion + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_dealloc_stag( + struct i40iw_sc_dev *dev, + struct i40iw_dealloc_stag_info *info, + u64 scratch, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 8, + LS_64(info->pd_id, I40IW_CQPSQ_STAG_PDID)); + set_64bit_val(wqe, + 16, + LS_64(info->stag_idx, I40IW_CQPSQ_STAG_IDX)); + + header = LS_64(I40IW_CQP_OP_DEALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(info->mr, I40IW_CQPSQ_STAG_MR) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "DEALLOC_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_query_stag - query hardware for stag + * @dev: sc device struct + * @scratch: u64 saved to be used during cqp completion + * @stag_index: stag index for query + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_query_stag(struct i40iw_sc_dev *dev, + u64 scratch, + u32 stag_index, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_sc_cqp *cqp; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(stag_index, I40IW_CQPSQ_QUERYSTAG_IDX)); + + header = LS_64(I40IW_CQP_OP_QUERY_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "QUERY_STAG WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mw_alloc - mw allocate + * @dev: sc device struct + * @scratch: u64 saved to be used during cqp completion + * @mw_stag_index:stag index + * @pd_id: pd is for this mw + * @post_sq: flag for cqp db to ring + */ +static enum i40iw_status_code i40iw_sc_mw_alloc( + struct i40iw_sc_dev *dev, + u64 scratch, + u32 mw_stag_index, + u16 pd_id, + bool post_sq) +{ + u64 header; + struct i40iw_sc_cqp *cqp; + u64 *wqe; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, 8, LS_64(pd_id, I40IW_CQPSQ_STAG_PDID)); + set_64bit_val(wqe, + 16, + LS_64(mw_stag_index, I40IW_CQPSQ_STAG_IDX)); + + header = LS_64(I40IW_CQP_OP_ALLOC_STAG, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_WQE, "MW_ALLOC WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_mr_fast_register - Posts RDMA fast register mr WR to iwarp qp + * @qp: sc qp struct + * @info: fast mr info + * @post_sq: flag for cqp db to ring + */ +enum i40iw_status_code i40iw_sc_mr_fast_register( + struct i40iw_sc_qp *qp, + struct i40iw_fast_reg_stag_info *info, + bool post_sq) +{ + u64 temp, header; + u64 *wqe; + u32 wqe_idx; + enum i40iw_page_size page_size; + + page_size = (info->page_size == 0x200000) ? I40IW_PAGE_SIZE_2M : I40IW_PAGE_SIZE_4K; + wqe = i40iw_qp_get_next_send_wqe(&qp->qp_uk, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, + 0, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + i40iw_debug(qp->dev, I40IW_DEBUG_MR, "%s: wr_id[%llxh] wqe_idx[%04d] location[%p]\n", + __func__, info->wr_id, wqe_idx, + &qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid); + temp = (info->addr_type == I40IW_ADDR_TYPE_VA_BASED) ? (uintptr_t)info->va : info->fbo; + set_64bit_val(wqe, 0, temp); + + temp = RS_64(info->first_pm_pbl_index >> 16, I40IWQPSQ_FIRSTPMPBLIDXHI); + set_64bit_val(wqe, + 8, + LS_64(temp, I40IWQPSQ_FIRSTPMPBLIDXHI) | + LS_64(info->reg_addr_pa >> I40IWQPSQ_PBLADDR_SHIFT, I40IWQPSQ_PBLADDR)); + + set_64bit_val(wqe, + 16, + info->total_len | + LS_64(info->first_pm_pbl_index, I40IWQPSQ_FIRSTPMPBLIDXLO)); + + header = LS_64(info->stag_key, I40IWQPSQ_STAGKEY) | + LS_64(info->stag_idx, I40IWQPSQ_STAGINDEX) | + LS_64(I40IWQP_OP_FAST_REGISTER, I40IWQPSQ_OPCODE) | + LS_64(info->chunk_size, I40IWQPSQ_LPBLSIZE) | + LS_64(page_size, I40IWQPSQ_HPAGESIZE) | + LS_64(info->access_rights, I40IWQPSQ_STAGRIGHTS) | + LS_64(info->addr_type, I40IWQPSQ_VABASEDTO) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "FAST_REG WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); + + if (post_sq) + i40iw_qp_post_wr(&qp->qp_uk); + return 0; +} + +/** + * i40iw_sc_send_lsmm - send last streaming mode message + * @qp: sc qp struct + * @lsmm_buf: buffer with lsmm message + * @size: size of lsmm buffer + * @stag: stag of lsmm buffer + */ +static void i40iw_sc_send_lsmm(struct i40iw_sc_qp *qp, + void *lsmm_buf, + u32 size, + i40iw_stag stag) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); + + set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG))); + + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_QP, "SEND_LSMM WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_send_lsmm_nostag - for privilege qp + * @qp: sc qp struct + * @lsmm_buf: buffer with lsmm message + * @size: size of lsmm buffer + */ +static void i40iw_sc_send_lsmm_nostag(struct i40iw_sc_qp *qp, + void *lsmm_buf, + u32 size) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); + + set_64bit_val(wqe, 8, size); + + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "SEND_LSMM_NOSTAG WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_send_rtt - send last read0 or write0 + * @qp: sc qp struct + * @read: Do read0 or write0 + */ +static void i40iw_sc_send_rtt(struct i40iw_sc_qp *qp, bool read) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + if (read) { + header = LS_64(0x1234, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_READ, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + set_64bit_val(wqe, 8, ((u64)0xabcd << 32)); + } else { + header = LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + } + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_WQE, "RTR WQE", + wqe, I40IW_QP_WQE_MIN_SIZE); +} + +/** + * i40iw_sc_post_wqe0 - send wqe with opcode + * @qp: sc qp struct + * @opcode: opcode to use for wqe0 + */ +static enum i40iw_status_code i40iw_sc_post_wqe0(struct i40iw_sc_qp *qp, u8 opcode) +{ + u64 *wqe; + u64 header; + struct i40iw_qp_uk *qp_uk; + + qp_uk = &qp->qp_uk; + wqe = qp_uk->sq_base->elem; + + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + switch (opcode) { + case I40IWQP_OP_NOP: + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID); + + i40iw_insert_wqe_hdr(wqe, header); + break; + case I40IWQP_OP_RDMA_SEND: + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IWQP_OP_RDMA_SEND, I40IWQPSQ_OPCODE) | + LS_64(qp->qp_uk.swqe_polarity, I40IWQPSQ_VALID) | + LS_64(1, I40IWQPSQ_STREAMMODE) | + LS_64(1, I40IWQPSQ_WAITFORRCVPDU); + + i40iw_insert_wqe_hdr(wqe, header); + break; + default: + i40iw_debug(qp->dev, I40IW_DEBUG_QP, "%s: Invalid WQE zero opcode\n", + __func__); + break; + } + return 0; +} + +/** + * i40iw_sc_init_iw_hmc() - queries fpm values using cqp and populates hmc_info + * @dev : ptr to i40iw_dev struct + * @hmc_fn_id: hmc function id + */ +enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info; + struct i40iw_dma_mem query_fpm_mem; + struct i40iw_virt_mem virt_mem; + struct i40iw_vfdev *vf_dev = NULL; + u32 mem_size; + enum i40iw_status_code ret_code = 0; + bool poll_registers = true; + u16 iw_vf_idx; + u8 wait_type; + + if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || + (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) + return I40IW_ERR_INVALID_HMCFN_ID; + + i40iw_debug(dev, I40IW_DEBUG_HMC, "hmc_fn_id %u, dev->hmc_fn_id %u\n", hmc_fn_id, + dev->hmc_fn_id); + if (hmc_fn_id == dev->hmc_fn_id) { + hmc_info = dev->hmc_info; + query_fpm_mem.pa = dev->fpm_query_buf_pa; + query_fpm_mem.va = dev->fpm_query_buf; + } else { + vf_dev = i40iw_vfdev_from_fpm(dev, hmc_fn_id); + if (!vf_dev) + return I40IW_ERR_INVALID_VF_ID; + + hmc_info = &vf_dev->hmc_info; + iw_vf_idx = vf_dev->iw_vf_idx; + i40iw_debug(dev, I40IW_DEBUG_HMC, "vf_dev %p, hmc_info %p, hmc_obj %p\n", vf_dev, + hmc_info, hmc_info->hmc_obj); + if (!vf_dev->fpm_query_buf) { + if (!dev->vf_fpm_query_buf[iw_vf_idx].va) { + ret_code = i40iw_alloc_query_fpm_buf(dev, + &dev->vf_fpm_query_buf[iw_vf_idx]); + if (ret_code) + return ret_code; + } + vf_dev->fpm_query_buf = dev->vf_fpm_query_buf[iw_vf_idx].va; + vf_dev->fpm_query_buf_pa = dev->vf_fpm_query_buf[iw_vf_idx].pa; + } + query_fpm_mem.pa = vf_dev->fpm_query_buf_pa; + query_fpm_mem.va = vf_dev->fpm_query_buf; + /** + * It is HARDWARE specific: + * this call is done by PF for VF and + * i40iw_sc_query_fpm_values needs ccq poll + * because PF ccq is already created. + */ + poll_registers = false; + } + + hmc_info->hmc_fn_id = hmc_fn_id; + + if (hmc_fn_id != dev->hmc_fn_id) { + ret_code = + i40iw_cqp_query_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); + } else { + wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : + (u8)I40IW_CQP_WAIT_POLL_CQ; + + ret_code = i40iw_sc_query_fpm_values( + dev->cqp, + 0, + hmc_info->hmc_fn_id, + &query_fpm_mem, + true, + wait_type); + } + if (ret_code) + return ret_code; + + /* parse the fpm_query_buf and fill hmc obj info */ + ret_code = + i40iw_sc_parse_fpm_query_buf((u64 *)query_fpm_mem.va, + hmc_info, + &dev->hmc_fpm_misc); + if (ret_code) + return ret_code; + i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "QUERY FPM BUFFER", + query_fpm_mem.va, I40IW_QUERY_FPM_BUF_SIZE); + + if (hmc_fn_id != dev->hmc_fn_id) { + i40iw_cqp_commit_fpm_values_cmd(dev, &query_fpm_mem, hmc_fn_id); + + /* parse the fpm_commit_buf and fill hmc obj info */ + i40iw_sc_parse_fpm_commit_buf((u64 *)query_fpm_mem.va, hmc_info->hmc_obj, &hmc_info->sd_table.sd_cnt); + mem_size = sizeof(struct i40iw_hmc_sd_entry) * + (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index); + ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); + if (ret_code) + return ret_code; + hmc_info->sd_table.sd_entry = virt_mem.va; + } + + return ret_code; +} + +/** + * i40iw_sc_configure_iw_fpm() - commits hmc obj cnt values using cqp command and + * populates fpm base address in hmc_info + * @dev : ptr to i40iw_dev struct + * @hmc_fn_id: hmc function id + */ +static enum i40iw_status_code i40iw_sc_configure_iw_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_obj_info *obj_info; + u64 *buf; + struct i40iw_dma_mem commit_fpm_mem; + u32 i, j; + enum i40iw_status_code ret_code = 0; + bool poll_registers = true; + u8 wait_type; + + if (hmc_fn_id >= I40IW_MAX_VF_FPM_ID || + (dev->hmc_fn_id != hmc_fn_id && hmc_fn_id < I40IW_FIRST_VF_FPM_ID)) + return I40IW_ERR_INVALID_HMCFN_ID; + + if (hmc_fn_id == dev->hmc_fn_id) { + hmc_info = dev->hmc_info; + } else { + hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, hmc_fn_id); + poll_registers = false; + } + if (!hmc_info) + return I40IW_ERR_BAD_PTR; + + obj_info = hmc_info->hmc_obj; + buf = dev->fpm_commit_buf; + + /* copy cnt values in commit buf */ + for (i = I40IW_HMC_IW_QP, j = 0; i <= I40IW_HMC_IW_PBLE; + i++, j += 8) + set_64bit_val(buf, j, (u64)obj_info[i].cnt); + + set_64bit_val(buf, 40, 0); /* APBVT rsvd */ + + commit_fpm_mem.pa = dev->fpm_commit_buf_pa; + commit_fpm_mem.va = dev->fpm_commit_buf; + wait_type = poll_registers ? (u8)I40IW_CQP_WAIT_POLL_REGS : + (u8)I40IW_CQP_WAIT_POLL_CQ; + ret_code = i40iw_sc_commit_fpm_values( + dev->cqp, + 0, + hmc_info->hmc_fn_id, + &commit_fpm_mem, + true, + wait_type); + + /* parse the fpm_commit_buf and fill hmc obj info */ + if (!ret_code) + ret_code = i40iw_sc_parse_fpm_commit_buf(dev->fpm_commit_buf, + hmc_info->hmc_obj, + &hmc_info->sd_table.sd_cnt); + + i40iw_debug_buf(dev, I40IW_DEBUG_HMC, "COMMIT FPM BUFFER", + commit_fpm_mem.va, I40IW_COMMIT_FPM_BUF_SIZE); + + return ret_code; +} + +/** + * cqp_sds_wqe_fill - fill cqp wqe doe sd + * @cqp: struct for cqp hw + * @info; sd info for wqe + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code cqp_sds_wqe_fill(struct i40iw_sc_cqp *cqp, + struct i40iw_update_sds_info *info, + u64 scratch) +{ + u64 data; + u64 header; + u64 *wqe; + int mem_entries, wqe_entries; + struct i40iw_dma_mem *sdbuf = &cqp->sdbuf; + u64 offset; + u32 wqe_idx; + + wqe = i40iw_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx); + if (!wqe) + return I40IW_ERR_RING_FULL; + + I40IW_CQP_INIT_WQE(wqe); + wqe_entries = (info->cnt > 3) ? 3 : info->cnt; + mem_entries = info->cnt - wqe_entries; + + header = LS_64(I40IW_CQP_OP_UPDATE_PE_SDS, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID) | + LS_64(mem_entries, I40IW_CQPSQ_UPESD_ENTRY_COUNT); + + if (mem_entries) { + offset = wqe_idx * I40IW_UPDATE_SD_BUF_SIZE; + memcpy((char *)sdbuf->va + offset, &info->entry[3], + mem_entries << 4); + data = (u64)sdbuf->pa + offset; + } else { + data = 0; + } + data |= LS_64(info->hmc_fn_id, I40IW_CQPSQ_UPESD_HMCFNID); + + set_64bit_val(wqe, 16, data); + + switch (wqe_entries) { + case 3: + set_64bit_val(wqe, 48, + (LS_64(info->entry[2].cmd, I40IW_CQPSQ_UPESD_SDCMD) | + LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); + + set_64bit_val(wqe, 56, info->entry[2].data); + /* fallthrough */ + case 2: + set_64bit_val(wqe, 32, + (LS_64(info->entry[1].cmd, I40IW_CQPSQ_UPESD_SDCMD) | + LS_64(1, I40IW_CQPSQ_UPESD_ENTRY_VALID))); + + set_64bit_val(wqe, 40, info->entry[1].data); + /* fallthrough */ + case 1: + set_64bit_val(wqe, 0, + LS_64(info->entry[0].cmd, I40IW_CQPSQ_UPESD_SDCMD)); + + set_64bit_val(wqe, 8, info->entry[0].data); + break; + default: + break; + } + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "UPDATE_PE_SDS WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + return 0; +} + +/** + * i40iw_update_pe_sds - cqp wqe for sd + * @dev: ptr to i40iw_dev struct + * @info: sd info for sd's + * @scratch: u64 saved to be used during cqp completion + */ +static enum i40iw_status_code i40iw_update_pe_sds(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info, + u64 scratch) +{ + struct i40iw_sc_cqp *cqp = dev->cqp; + enum i40iw_status_code ret_code; + + ret_code = cqp_sds_wqe_fill(cqp, info, scratch); + if (!ret_code) + i40iw_sc_cqp_post_sq(cqp); + + return ret_code; +} + +/** + * i40iw_update_sds_noccq - update sd before ccq created + * @dev: sc device struct + * @info: sd info for sd's + */ +enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info) +{ + u32 error, val, tail; + struct i40iw_sc_cqp *cqp = dev->cqp; + enum i40iw_status_code ret_code; + + ret_code = cqp_sds_wqe_fill(cqp, info, 0); + if (ret_code) + return ret_code; + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) + return I40IW_ERR_CQP_COMPL_ERROR; + + i40iw_sc_cqp_post_sq(cqp); + ret_code = i40iw_cqp_poll_registers(cqp, tail, I40IW_DONE_COUNT); + + return ret_code; +} + +/** + * i40iw_sc_suspend_qp - suspend qp for param change + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, + u64 scratch) +{ + u64 header; + u64 *wqe; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_SUSPENDQP_QPID) | + LS_64(I40IW_CQP_OP_SUSPEND_QP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SUSPEND_QP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_resume_qp - resume qp after suspend + * @cqp: struct for cqp hw + * @qp: sc qp struct + * @scratch: u64 saved to be used during cqp completion + */ +enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, + u64 scratch) +{ + u64 header; + u64 *wqe; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(qp->qs_handle, I40IW_CQPSQ_RESUMEQP_QSHANDLE)); + + header = LS_64(qp->qp_uk.qp_id, I40IW_CQPSQ_RESUMEQP_QPID) | + LS_64(I40IW_CQP_OP_RESUME_QP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "RESUME_QP WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +/** + * i40iw_sc_static_hmc_pages_allocated - cqp wqe to allocate hmc pages + * @cqp: struct for cqp hw + * @scratch: u64 saved to be used during cqp completion + * @hmc_fn_id: hmc function id + * @post_sq: flag for cqp db to ring + * @poll_registers: flag to poll register for cqp completion + */ +enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated( + struct i40iw_sc_cqp *cqp, + u64 scratch, + u8 hmc_fn_id, + bool post_sq, + bool poll_registers) +{ + u64 header; + u64 *wqe; + u32 tail, val, error; + enum i40iw_status_code ret_code = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + set_64bit_val(wqe, + 16, + LS_64(hmc_fn_id, I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID)); + + header = LS_64(I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "SHMC_PAGES_ALLOCATED WQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + i40iw_get_cqp_reg_info(cqp, &val, &tail, &error); + if (error) { + ret_code = I40IW_ERR_CQP_COMPL_ERROR; + return ret_code; + } + if (post_sq) { + i40iw_sc_cqp_post_sq(cqp); + if (poll_registers) + /* check for cqp sq tail update */ + ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); + else + ret_code = i40iw_sc_poll_for_cqp_op_done(cqp, + I40IW_CQP_OP_SHMC_PAGES_ALLOCATED, + NULL); + } + + return ret_code; +} + +/** + * i40iw_ring_full - check if cqp ring is full + * @cqp: struct for cqp hw + */ +static bool i40iw_ring_full(struct i40iw_sc_cqp *cqp) +{ + return I40IW_RING_FULL_ERR(cqp->sq_ring); +} + +/** + * i40iw_est_sd - returns approximate number of SDs for HMC + * @dev: sc device struct + * @hmc_info: hmc structure, size and count for HMC objects + */ +static u64 i40iw_est_sd(struct i40iw_sc_dev *dev, struct i40iw_hmc_info *hmc_info) +{ + int i; + u64 size = 0; + u64 sd; + + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_PBLE; i++) + size += hmc_info->hmc_obj[i].cnt * hmc_info->hmc_obj[i].size; + + if (dev->is_pf) + size += hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size; + + if (size & 0x1FFFFF) + sd = (size >> 21) + 1; /* add 1 for remainder */ + else + sd = size >> 21; + + if (!dev->is_pf) { + /* 2MB alignment for VF PBLE HMC */ + size = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt * hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].size; + if (size & 0x1FFFFF) + sd += (size >> 21) + 1; /* add 1 for remainder */ + else + sd += size >> 21; + } + + return sd; +} + +/** + * i40iw_config_fpm_values - configure HMC objects + * @dev: sc device struct + * @qp_count: desired qp count + */ +enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count) +{ + struct i40iw_virt_mem virt_mem; + u32 i, mem_size; + u32 qpwantedoriginal, qpwanted, mrwanted, pblewanted; + u64 sd_needed; + u32 loop_count = 0; + + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_fpm_misc *hmc_fpm_misc; + enum i40iw_status_code ret_code = 0; + + hmc_info = dev->hmc_info; + hmc_fpm_misc = &dev->hmc_fpm_misc; + + ret_code = i40iw_sc_init_iw_hmc(dev, dev->hmc_fn_id); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "i40iw_sc_init_iw_hmc returned error_code = %d\n", + ret_code); + return ret_code; + } + + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) + hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; + sd_needed = i40iw_est_sd(dev, hmc_info); + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: FW initial max sd_count[%08lld] first_sd_index[%04d]\n", + __func__, sd_needed, hmc_info->first_sd_index); + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: sd count %d where max sd is %d\n", + __func__, hmc_info->sd_table.sd_cnt, + hmc_fpm_misc->max_sds); + + qpwanted = min(qp_count, hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt); + qpwantedoriginal = qpwanted; + mrwanted = hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt; + pblewanted = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt; + + i40iw_debug(dev, I40IW_DEBUG_HMC, + "req_qp=%d max_sd=%d, max_qp = %d, max_cq=%d, max_mr=%d, max_pble=%d\n", + qp_count, hmc_fpm_misc->max_sds, + hmc_info->hmc_obj[I40IW_HMC_IW_QP].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_MR].max_cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].max_cnt); + + do { + ++loop_count; + hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt = qpwanted; + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt = + min(2 * qpwanted, hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt); + hmc_info->hmc_obj[I40IW_HMC_IW_SRQ].cnt = 0x00; /* Reserved */ + hmc_info->hmc_obj[I40IW_HMC_IW_HTE].cnt = + qpwanted * hmc_fpm_misc->ht_multiplier; + hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_ARP].max_cnt; + hmc_info->hmc_obj[I40IW_HMC_IW_APBVT_ENTRY].cnt = 1; + hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt = mrwanted; + + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt = + roundup_pow_of_two(I40IW_MAX_WQ_ENTRIES * qpwanted); + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt = + roundup_pow_of_two(2 * I40IW_MAX_IRD_SIZE * qpwanted); + hmc_info->hmc_obj[I40IW_HMC_IW_XFFL].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_XF].cnt / hmc_fpm_misc->xf_block_size; + hmc_info->hmc_obj[I40IW_HMC_IW_Q1FL].cnt = + hmc_info->hmc_obj[I40IW_HMC_IW_Q1].cnt / hmc_fpm_misc->q1_block_size; + hmc_info->hmc_obj[I40IW_HMC_IW_TIMER].cnt = + ((qpwanted) / 512 + 1) * hmc_fpm_misc->timer_bucket; + hmc_info->hmc_obj[I40IW_HMC_IW_FSIMC].cnt = 0x00; + hmc_info->hmc_obj[I40IW_HMC_IW_FSIAV].cnt = 0x00; + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt = pblewanted; + + /* How much memory is needed for all the objects. */ + sd_needed = i40iw_est_sd(dev, hmc_info); + if ((loop_count > 1000) || + ((!(loop_count % 10)) && + (qpwanted > qpwantedoriginal * 2 / 3))) { + if (qpwanted > FPM_MULTIPLIER) + qpwanted = roundup_pow_of_two(qpwanted - + FPM_MULTIPLIER); + qpwanted >>= 1; + } + if (mrwanted > FPM_MULTIPLIER * 10) + mrwanted -= FPM_MULTIPLIER * 10; + if (pblewanted > FPM_MULTIPLIER * 1000) + pblewanted -= FPM_MULTIPLIER * 1000; + } while (sd_needed > hmc_fpm_misc->max_sds && loop_count < 2000); + + i40iw_debug(dev, I40IW_DEBUG_HMC, + "loop_cnt=%d, sd_needed=%lld, qpcnt = %d, cqcnt=%d, mrcnt=%d, pblecnt=%d\n", + loop_count, sd_needed, + hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt, + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt); + + ret_code = i40iw_sc_configure_iw_fpm(dev, dev->hmc_fn_id); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "configure_iw_fpm returned error_code[x%08X]\n", + i40iw_rd32(dev->hw, dev->is_pf ? I40E_PFPE_CQPERRCODES : I40E_VFPE_CQPERRCODES1)); + return ret_code; + } + + mem_size = sizeof(struct i40iw_hmc_sd_entry) * + (hmc_info->sd_table.sd_cnt + hmc_info->first_sd_index + 1); + ret_code = i40iw_allocate_virt_mem(dev->hw, &virt_mem, mem_size); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: failed to allocate memory for sd_entry buffer\n", + __func__); + return ret_code; + } + hmc_info->sd_table.sd_entry = virt_mem.va; + + return ret_code; +} + +/** + * i40iw_exec_cqp_cmd - execute cqp cmd when wqe are available + * @dev: rdma device + * @pcmdinfo: cqp command info + */ +static enum i40iw_status_code i40iw_exec_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo) +{ + enum i40iw_status_code status; + struct i40iw_dma_mem values_mem; + + dev->cqp_cmd_stats[pcmdinfo->cqp_cmd]++; + switch (pcmdinfo->cqp_cmd) { + case OP_DELETE_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_del_local_mac_ipaddr_entry( + pcmdinfo->in.u.del_local_mac_ipaddr_entry.cqp, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.scratch, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.entry_idx, + pcmdinfo->in.u.del_local_mac_ipaddr_entry.ignore_ref_count, + pcmdinfo->post_sq); + break; + case OP_CEQ_DESTROY: + status = i40iw_sc_ceq_destroy(pcmdinfo->in.u.ceq_destroy.ceq, + pcmdinfo->in.u.ceq_destroy.scratch, + pcmdinfo->post_sq); + break; + case OP_AEQ_DESTROY: + status = i40iw_sc_aeq_destroy(pcmdinfo->in.u.aeq_destroy.aeq, + pcmdinfo->in.u.aeq_destroy.scratch, + pcmdinfo->post_sq); + + break; + case OP_DELETE_ARP_CACHE_ENTRY: + status = i40iw_sc_del_arp_cache_entry( + pcmdinfo->in.u.del_arp_cache_entry.cqp, + pcmdinfo->in.u.del_arp_cache_entry.scratch, + pcmdinfo->in.u.del_arp_cache_entry.arp_index, + pcmdinfo->post_sq); + break; + case OP_MANAGE_APBVT_ENTRY: + status = i40iw_sc_manage_apbvt_entry( + pcmdinfo->in.u.manage_apbvt_entry.cqp, + &pcmdinfo->in.u.manage_apbvt_entry.info, + pcmdinfo->in.u.manage_apbvt_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_CEQ_CREATE: + status = i40iw_sc_ceq_create(pcmdinfo->in.u.ceq_create.ceq, + pcmdinfo->in.u.ceq_create.scratch, + pcmdinfo->post_sq); + break; + case OP_AEQ_CREATE: + status = i40iw_sc_aeq_create(pcmdinfo->in.u.aeq_create.aeq, + pcmdinfo->in.u.aeq_create.scratch, + pcmdinfo->post_sq); + break; + case OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_alloc_local_mac_ipaddr_entry( + pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.cqp, + pcmdinfo->in.u.alloc_local_mac_ipaddr_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_ADD_LOCAL_MAC_IPADDR_ENTRY: + status = i40iw_sc_add_local_mac_ipaddr_entry( + pcmdinfo->in.u.add_local_mac_ipaddr_entry.cqp, + &pcmdinfo->in.u.add_local_mac_ipaddr_entry.info, + pcmdinfo->in.u.add_local_mac_ipaddr_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_MANAGE_QHASH_TABLE_ENTRY: + status = i40iw_sc_manage_qhash_table_entry( + pcmdinfo->in.u.manage_qhash_table_entry.cqp, + &pcmdinfo->in.u.manage_qhash_table_entry.info, + pcmdinfo->in.u.manage_qhash_table_entry.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_MODIFY: + status = i40iw_sc_qp_modify( + pcmdinfo->in.u.qp_modify.qp, + &pcmdinfo->in.u.qp_modify.info, + pcmdinfo->in.u.qp_modify.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_UPLOAD_CONTEXT: + status = i40iw_sc_qp_upload_context( + pcmdinfo->in.u.qp_upload_context.dev, + &pcmdinfo->in.u.qp_upload_context.info, + pcmdinfo->in.u.qp_upload_context.scratch, + pcmdinfo->post_sq); + + break; + case OP_CQ_CREATE: + status = i40iw_sc_cq_create( + pcmdinfo->in.u.cq_create.cq, + pcmdinfo->in.u.cq_create.scratch, + pcmdinfo->in.u.cq_create.check_overflow, + pcmdinfo->post_sq); + break; + case OP_CQ_DESTROY: + status = i40iw_sc_cq_destroy( + pcmdinfo->in.u.cq_destroy.cq, + pcmdinfo->in.u.cq_destroy.scratch, + pcmdinfo->post_sq); + + break; + case OP_QP_CREATE: + status = i40iw_sc_qp_create( + pcmdinfo->in.u.qp_create.qp, + &pcmdinfo->in.u.qp_create.info, + pcmdinfo->in.u.qp_create.scratch, + pcmdinfo->post_sq); + break; + case OP_QP_DESTROY: + status = i40iw_sc_qp_destroy( + pcmdinfo->in.u.qp_destroy.qp, + pcmdinfo->in.u.qp_destroy.scratch, + pcmdinfo->in.u.qp_destroy.remove_hash_idx, + pcmdinfo->in.u.qp_destroy. + ignore_mw_bnd, + pcmdinfo->post_sq); + + break; + case OP_ALLOC_STAG: + status = i40iw_sc_alloc_stag( + pcmdinfo->in.u.alloc_stag.dev, + &pcmdinfo->in.u.alloc_stag.info, + pcmdinfo->in.u.alloc_stag.scratch, + pcmdinfo->post_sq); + break; + case OP_MR_REG_NON_SHARED: + status = i40iw_sc_mr_reg_non_shared( + pcmdinfo->in.u.mr_reg_non_shared.dev, + &pcmdinfo->in.u.mr_reg_non_shared.info, + pcmdinfo->in.u.mr_reg_non_shared.scratch, + pcmdinfo->post_sq); + + break; + case OP_DEALLOC_STAG: + status = i40iw_sc_dealloc_stag( + pcmdinfo->in.u.dealloc_stag.dev, + &pcmdinfo->in.u.dealloc_stag.info, + pcmdinfo->in.u.dealloc_stag.scratch, + pcmdinfo->post_sq); + + break; + case OP_MW_ALLOC: + status = i40iw_sc_mw_alloc( + pcmdinfo->in.u.mw_alloc.dev, + pcmdinfo->in.u.mw_alloc.scratch, + pcmdinfo->in.u.mw_alloc.mw_stag_index, + pcmdinfo->in.u.mw_alloc.pd_id, + pcmdinfo->post_sq); + + break; + case OP_QP_FLUSH_WQES: + status = i40iw_sc_qp_flush_wqes( + pcmdinfo->in.u.qp_flush_wqes.qp, + &pcmdinfo->in.u.qp_flush_wqes.info, + pcmdinfo->in.u.qp_flush_wqes. + scratch, pcmdinfo->post_sq); + break; + case OP_GEN_AE: + status = i40iw_sc_gen_ae( + pcmdinfo->in.u.gen_ae.qp, + &pcmdinfo->in.u.gen_ae.info, + pcmdinfo->in.u.gen_ae.scratch, + pcmdinfo->post_sq); + break; + case OP_ADD_ARP_CACHE_ENTRY: + status = i40iw_sc_add_arp_cache_entry( + pcmdinfo->in.u.add_arp_cache_entry.cqp, + &pcmdinfo->in.u.add_arp_cache_entry.info, + pcmdinfo->in.u.add_arp_cache_entry.scratch, + pcmdinfo->post_sq); + break; + case OP_MANAGE_PUSH_PAGE: + status = i40iw_sc_manage_push_page( + pcmdinfo->in.u.manage_push_page.cqp, + &pcmdinfo->in.u.manage_push_page.info, + pcmdinfo->in.u.manage_push_page.scratch, + pcmdinfo->post_sq); + break; + case OP_UPDATE_PE_SDS: + /* case I40IW_CQP_OP_UPDATE_PE_SDS */ + status = i40iw_update_pe_sds( + pcmdinfo->in.u.update_pe_sds.dev, + &pcmdinfo->in.u.update_pe_sds.info, + pcmdinfo->in.u.update_pe_sds. + scratch); + + break; + case OP_MANAGE_HMC_PM_FUNC_TABLE: + status = i40iw_sc_manage_hmc_pm_func_table( + pcmdinfo->in.u.manage_hmc_pm.dev->cqp, + pcmdinfo->in.u.manage_hmc_pm.scratch, + (u8)pcmdinfo->in.u.manage_hmc_pm.info.vf_id, + pcmdinfo->in.u.manage_hmc_pm.info.free_fcn, + true); + break; + case OP_SUSPEND: + status = i40iw_sc_suspend_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case OP_RESUME: + status = i40iw_sc_resume_qp( + pcmdinfo->in.u.suspend_resume.cqp, + pcmdinfo->in.u.suspend_resume.qp, + pcmdinfo->in.u.suspend_resume.scratch); + break; + case OP_MANAGE_VF_PBLE_BP: + status = i40iw_manage_vf_pble_bp( + pcmdinfo->in.u.manage_vf_pble_bp.cqp, + &pcmdinfo->in.u.manage_vf_pble_bp.info, + pcmdinfo->in.u.manage_vf_pble_bp.scratch, true); + break; + case OP_QUERY_FPM_VALUES: + values_mem.pa = pcmdinfo->in.u.query_fpm_values.fpm_values_pa; + values_mem.va = pcmdinfo->in.u.query_fpm_values.fpm_values_va; + status = i40iw_sc_query_fpm_values( + pcmdinfo->in.u.query_fpm_values.cqp, + pcmdinfo->in.u.query_fpm_values.scratch, + pcmdinfo->in.u.query_fpm_values.hmc_fn_id, + &values_mem, true, I40IW_CQP_WAIT_EVENT); + break; + case OP_COMMIT_FPM_VALUES: + values_mem.pa = pcmdinfo->in.u.commit_fpm_values.fpm_values_pa; + values_mem.va = pcmdinfo->in.u.commit_fpm_values.fpm_values_va; + status = i40iw_sc_commit_fpm_values( + pcmdinfo->in.u.commit_fpm_values.cqp, + pcmdinfo->in.u.commit_fpm_values.scratch, + pcmdinfo->in.u.commit_fpm_values.hmc_fn_id, + &values_mem, + true, + I40IW_CQP_WAIT_EVENT); + break; + default: + status = I40IW_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * i40iw_process_cqp_cmd - process all cqp commands + * @dev: sc device struct + * @pcmdinfo: cqp command info + */ +enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo) +{ + enum i40iw_status_code status = 0; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + if (list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) + status = i40iw_exec_cqp_cmd(dev, pcmdinfo); + else + list_add_tail(&pcmdinfo->cqp_cmd_entry, &dev->cqp_cmd_head); + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * i40iw_process_bh - called from tasklet for cqp list + * @dev: sc device struct + */ +enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev) +{ + enum i40iw_status_code status = 0; + struct cqp_commands_info *pcmdinfo; + unsigned long flags; + + spin_lock_irqsave(&dev->cqp_lock, flags); + while (!list_empty(&dev->cqp_cmd_head) && !i40iw_ring_full(dev->cqp)) { + pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); + + status = i40iw_exec_cqp_cmd(dev, pcmdinfo); + if (status) + break; + } + spin_unlock_irqrestore(&dev->cqp_lock, flags); + return status; +} + +/** + * i40iw_iwarp_opcode - determine if incoming is rdma layer + * @info: aeq info for the packet + * @pkt: packet for error + */ +static u32 i40iw_iwarp_opcode(struct i40iw_aeqe_info *info, u8 *pkt) +{ + __be16 *mpa; + u32 opcode = 0xffffffff; + + if (info->q2_data_written) { + mpa = (__be16 *)pkt; + opcode = ntohs(mpa[1]) & 0xf; + } + return opcode; +} + +/** + * i40iw_locate_mpa - return pointer to mpa in the pkt + * @pkt: packet with data + */ +static u8 *i40iw_locate_mpa(u8 *pkt) +{ + /* skip over ethernet header */ + pkt += I40IW_MAC_HLEN; + + /* Skip over IP and TCP headers */ + pkt += 4 * (pkt[0] & 0x0f); + pkt += 4 * ((pkt[12] >> 4) & 0x0f); + return pkt; +} + +/** + * i40iw_setup_termhdr - termhdr for terminate pkt + * @qp: sc qp ptr for pkt + * @hdr: term hdr + * @opcode: flush opcode for termhdr + * @layer_etype: error layer + error type + * @err: error cod ein the header + */ +static void i40iw_setup_termhdr(struct i40iw_sc_qp *qp, + struct i40iw_terminate_hdr *hdr, + enum i40iw_flush_opcode opcode, + u8 layer_etype, + u8 err) +{ + qp->flush_code = opcode; + hdr->layer_etype = layer_etype; + hdr->error_code = err; +} + +/** + * i40iw_bld_terminate_hdr - build terminate message header + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +static int i40iw_bld_terminate_hdr(struct i40iw_sc_qp *qp, + struct i40iw_aeqe_info *info) +{ + u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; + u16 ddp_seg_len; + int copy_len = 0; + u8 is_tagged = 0; + u32 opcode; + struct i40iw_terminate_hdr *termhdr; + + termhdr = (struct i40iw_terminate_hdr *)qp->q2_buf; + memset(termhdr, 0, Q2_BAD_FRAME_OFFSET); + + if (info->q2_data_written) { + /* Use data from offending packet to fill in ddp & rdma hdrs */ + pkt = i40iw_locate_mpa(pkt); + ddp_seg_len = ntohs(*(__be16 *)pkt); + if (ddp_seg_len) { + copy_len = 2; + termhdr->hdrct = DDP_LEN_FLAG; + if (pkt[2] & 0x80) { + is_tagged = 1; + if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) { + copy_len += TERM_DDP_LEN_TAGGED; + termhdr->hdrct |= DDP_HDR_FLAG; + } + } else { + if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) { + copy_len += TERM_DDP_LEN_UNTAGGED; + termhdr->hdrct |= DDP_HDR_FLAG; + } + + if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) { + if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) { + copy_len += TERM_RDMA_LEN; + termhdr->hdrct |= RDMA_HDR_FLAG; + } + } + } + } + } + + opcode = i40iw_iwarp_opcode(info, pkt); + + switch (info->ae_id) { + case I40IW_AE_AMP_UNALLOCATED_STAG: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + if (opcode == I40IW_OP_TYPE_RDMA_WRITE) + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_STAG); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); + break; + case I40IW_AE_AMP_BOUNDS_VIOLATION: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + if (info->q2_data_written) + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_BOUNDS); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_BOUNDS); + break; + case I40IW_AE_AMP_BAD_PD: + switch (opcode) { + case I40IW_OP_TYPE_RDMA_WRITE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_PROT_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_UNASSOC_STAG); + break; + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND_SOL_INV: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_CANT_INV_STAG); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_UNASSOC_STAG); + } + break; + case I40IW_AE_AMP_INVALID_STAG: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_INV_STAG); + break; + case I40IW_AE_AMP_BAD_QP: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); + break; + case I40IW_AE_AMP_BAD_STAG_KEY: + case I40IW_AE_AMP_BAD_STAG_INDEX: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + switch (opcode) { + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND_SOL_INV: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_CANT_INV_STAG); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_STAG); + } + break; + case I40IW_AE_AMP_RIGHTS_VIOLATION: + case I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: + case I40IW_AE_PRIV_OPERATION_DENIED: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_ACCESS); + break; + case I40IW_AE_AMP_TO_WRAP: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_ACCESS_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT, RDMAP_TO_WRAP); + break; + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_MPA << 4) | DDP_LLP, MPA_CRC); + break; + case I40IW_AE_LLP_SEGMENT_TOO_LARGE: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, + (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); + break; + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_DDP_NO_L_BIT: + i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, + (LAYER_DDP << 4) | DDP_CATASTROPHIC, DDP_CATASTROPHIC_LOCAL); + break; + case I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_RANGE); + break; + case I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: + qp->eventtype = TERM_EVENT_QP_ACCESS_ERR; + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_LEN_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_TOO_LONG); + break; + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + if (is_tagged) + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_TAGGED_BUFFER, DDP_TAGGED_INV_DDP_VER); + else + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_DDP_VER); + break; + case I40IW_AE_DDP_UBE_INVALID_MO: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MO); + break; + case I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_REM_OP_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_MSN_NO_BUF); + break; + case I40IW_AE_DDP_UBE_INVALID_QN: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER, DDP_UNTAGGED_INV_QN); + break; + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + i40iw_setup_termhdr(qp, termhdr, FLUSH_GENERAL_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_INV_RDMAP_VER); + break; + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + i40iw_setup_termhdr(qp, termhdr, FLUSH_LOC_QP_OP_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNEXPECTED_OP); + break; + default: + i40iw_setup_termhdr(qp, termhdr, FLUSH_FATAL_ERR, + (LAYER_RDMA << 4) | RDMAP_REMOTE_OP, RDMAP_UNSPECIFIED); + break; + } + + if (copy_len) + memcpy(termhdr + 1, pkt, copy_len); + + return sizeof(struct i40iw_terminate_hdr) + copy_len; +} + +/** + * i40iw_terminate_send_fin() - Send fin for terminate message + * @qp: qp associated with received terminate AE + */ +void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp) +{ + /* Send the fin only */ + i40iw_term_modify_qp(qp, + I40IW_QP_STATE_TERMINATE, + I40IWQP_TERM_SEND_FIN_ONLY, + 0); +} + +/** + * i40iw_terminate_connection() - Bad AE and send terminate to remote QP + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) +{ + u8 termlen = 0; + + if (qp->term_flags & I40IW_TERM_SENT) + return; /* Sanity check */ + + /* Eventtype can change from bld_terminate_hdr */ + qp->eventtype = TERM_EVENT_QP_FATAL; + termlen = i40iw_bld_terminate_hdr(qp, info); + i40iw_terminate_start_timer(qp); + qp->term_flags |= I40IW_TERM_SENT; + i40iw_term_modify_qp(qp, I40IW_QP_STATE_TERMINATE, + I40IWQP_TERM_SEND_TERM_ONLY, termlen); +} + +/** + * i40iw_terminate_received - handle terminate received AE + * @qp: qp associated with received terminate AE + * @info: the struct contiaing AE information + */ +void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info) +{ + u8 *pkt = qp->q2_buf + Q2_BAD_FRAME_OFFSET; + __be32 *mpa; + u8 ddp_ctl; + u8 rdma_ctl; + u16 aeq_id = 0; + struct i40iw_terminate_hdr *termhdr; + + mpa = (__be32 *)i40iw_locate_mpa(pkt); + if (info->q2_data_written) { + /* did not validate the frame - do it now */ + ddp_ctl = (ntohl(mpa[0]) >> 8) & 0xff; + rdma_ctl = ntohl(mpa[0]) & 0xff; + if ((ddp_ctl & 0xc0) != 0x40) + aeq_id = I40IW_AE_LCE_QP_CATASTROPHIC; + else if ((ddp_ctl & 0x03) != 1) + aeq_id = I40IW_AE_DDP_UBE_INVALID_DDP_VERSION; + else if (ntohl(mpa[2]) != 2) + aeq_id = I40IW_AE_DDP_UBE_INVALID_QN; + else if (ntohl(mpa[3]) != 1) + aeq_id = I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN; + else if (ntohl(mpa[4]) != 0) + aeq_id = I40IW_AE_DDP_UBE_INVALID_MO; + else if ((rdma_ctl & 0xc0) != 0x40) + aeq_id = I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION; + + info->ae_id = aeq_id; + if (info->ae_id) { + /* Bad terminate recvd - send back a terminate */ + i40iw_terminate_connection(qp, info); + return; + } + } + + qp->term_flags |= I40IW_TERM_RCVD; + qp->eventtype = TERM_EVENT_QP_FATAL; + termhdr = (struct i40iw_terminate_hdr *)&mpa[5]; + if (termhdr->layer_etype == RDMAP_REMOTE_PROT || + termhdr->layer_etype == RDMAP_REMOTE_OP) { + i40iw_terminate_done(qp, 0); + } else { + i40iw_terminate_start_timer(qp); + i40iw_terminate_send_fin(qp); + } +} + +/** + * i40iw_sc_vsi_init - Initialize virtual device + * @vsi: pointer to the vsi structure + * @info: parameters to initialize vsi + **/ +void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info) +{ + int i; + + vsi->dev = info->dev; + vsi->back_vsi = info->back_vsi; + vsi->mtu = info->params->mtu; + vsi->exception_lan_queue = info->exception_lan_queue; + i40iw_fill_qos_list(info->params->qs_handle_list); + + for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { + vsi->qos[i].qs_handle = info->params->qs_handle_list[i]; + i40iw_debug(vsi->dev, I40IW_DEBUG_DCB, "qset[%d]: %d\n", i, + vsi->qos[i].qs_handle); + spin_lock_init(&vsi->qos[i].lock); + INIT_LIST_HEAD(&vsi->qos[i].qplist); + } +} + +/** + * i40iw_hw_stats_init - Initiliaze HW stats table + * @stats: pestat struct + * @fcn_idx: PCI fn id + * @is_pf: Is it a PF? + * + * Populate the HW stats table with register offset addr for each + * stats. And start the perioidic stats timer. + */ +void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 fcn_idx, bool is_pf) +{ + u32 stats_reg_offset; + u32 stats_index; + struct i40iw_dev_hw_stats_offsets *stats_table = + &stats->hw_stats_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats; + + if (is_pf) { + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = + I40E_GLPES_PFIP4RXDISCARD(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = + I40E_GLPES_PFIP4RXTRUNC(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = + I40E_GLPES_PFIP4TXNOROUTE(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = + I40E_GLPES_PFIP6RXDISCARD(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = + I40E_GLPES_PFIP6RXTRUNC(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = + I40E_GLPES_PFIP6TXNOROUTE(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = + I40E_GLPES_PFTCPRTXSEG(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = + I40E_GLPES_PFTCPRXOPTERR(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = + I40E_GLPES_PFTCPRXPROTOERR(fcn_idx); + + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = + I40E_GLPES_PFIP4RXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = + I40E_GLPES_PFIP4RXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = + I40E_GLPES_PFIP4RXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = + I40E_GLPES_PFIP4RXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = + I40E_GLPES_PFIP4TXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = + I40E_GLPES_PFIP4TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = + I40E_GLPES_PFIP4TXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = + I40E_GLPES_PFIP4TXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = + I40E_GLPES_PFIP6RXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = + I40E_GLPES_PFIP6RXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = + I40E_GLPES_PFIP6RXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = + I40E_GLPES_PFIP6RXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = + I40E_GLPES_PFIP6TXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_PFIP6TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = + I40E_GLPES_PFIP6TXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = + I40E_GLPES_PFTCPRXSEGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = + I40E_GLPES_PFTCPTXSEGLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = + I40E_GLPES_PFRDMARXRDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = + I40E_GLPES_PFRDMARXSNDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = + I40E_GLPES_PFRDMARXWRSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = + I40E_GLPES_PFRDMATXRDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = + I40E_GLPES_PFRDMATXSNDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = + I40E_GLPES_PFRDMATXWRSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = + I40E_GLPES_PFRDMAVBNDLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = + I40E_GLPES_PFRDMAVINVLO(fcn_idx); + } else { + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXDISCARD] = + I40E_GLPES_VFIP4RXDISCARD(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4RXTRUNC] = + I40E_GLPES_VFIP4RXTRUNC(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = + I40E_GLPES_VFIP4TXNOROUTE(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXDISCARD] = + I40E_GLPES_VFIP6RXDISCARD(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6RXTRUNC] = + I40E_GLPES_VFIP6RXTRUNC(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = + I40E_GLPES_VFIP6TXNOROUTE(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRTXSEG] = + I40E_GLPES_VFTCPRTXSEG(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXOPTERR] = + I40E_GLPES_VFTCPRXOPTERR(fcn_idx); + stats_table->stats_offset_32[I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = + I40E_GLPES_VFTCPRXPROTOERR(fcn_idx); + + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXOCTS] = + I40E_GLPES_VFIP4RXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXPKTS] = + I40E_GLPES_VFIP4RXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXFRAGS] = + I40E_GLPES_VFIP4RXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4RXMCPKTS] = + I40E_GLPES_VFIP4RXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXOCTS] = + I40E_GLPES_VFIP4TXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXPKTS] = + I40E_GLPES_VFIP4TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXFRAGS] = + I40E_GLPES_VFIP4TXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP4TXMCPKTS] = + I40E_GLPES_VFIP4TXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXOCTS] = + I40E_GLPES_VFIP6RXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXPKTS] = + I40E_GLPES_VFIP6RXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXFRAGS] = + I40E_GLPES_VFIP6RXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6RXMCPKTS] = + I40E_GLPES_VFIP6RXMCPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXOCTS] = + I40E_GLPES_VFIP6TXOCTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXPKTS] = + I40E_GLPES_VFIP6TXPKTSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_IP6TXFRAGS] = + I40E_GLPES_VFIP6TXFRAGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPRXSEGS] = + I40E_GLPES_VFTCPRXSEGSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_TCPTXSEG] = + I40E_GLPES_VFTCPTXSEGLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXRDS] = + I40E_GLPES_VFRDMARXRDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXSNDS] = + I40E_GLPES_VFRDMARXSNDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMARXWRS] = + I40E_GLPES_VFRDMARXWRSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXRDS] = + I40E_GLPES_VFRDMATXRDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXSNDS] = + I40E_GLPES_VFRDMATXSNDSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMATXWRS] = + I40E_GLPES_VFRDMATXWRSLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVBND] = + I40E_GLPES_VFRDMAVBNDLO(fcn_idx); + stats_table->stats_offset_64[I40IW_HW_STAT_INDEX_RDMAVINV] = + I40E_GLPES_VFRDMAVINVLO(fcn_idx); + } + + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64; + stats_index++) { + stats_reg_offset = stats_table->stats_offset_64[stats_index]; + last_rd_stats->stats_value_64[stats_index] = + readq(stats->hw->hw_addr + stats_reg_offset); + } + + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32; + stats_index++) { + stats_reg_offset = stats_table->stats_offset_32[stats_index]; + last_rd_stats->stats_value_32[stats_index] = + i40iw_rd32(stats->hw, stats_reg_offset); + } +} + +/** + * i40iw_hw_stats_read_32 - Read 32-bit HW stats counters and accommodates for roll-overs. + * @stat: pestat struct + * @index: index in HW stats table which contains offset reg-addr + * @value: hw stats value + */ +void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats, + enum i40iw_hw_stats_index_32b index, + u64 *value) +{ + struct i40iw_dev_hw_stats_offsets *stats_table = + &stats->hw_stats_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats; + struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats; + u64 new_stats_value = 0; + u32 stats_reg_offset = stats_table->stats_offset_32[index]; + + new_stats_value = i40iw_rd32(stats->hw, stats_reg_offset); + /*roll-over case */ + if (new_stats_value < last_rd_stats->stats_value_32[index]) + hw_stats->stats_value_32[index] += new_stats_value; + else + hw_stats->stats_value_32[index] += + new_stats_value - last_rd_stats->stats_value_32[index]; + last_rd_stats->stats_value_32[index] = new_stats_value; + *value = hw_stats->stats_value_32[index]; +} + +/** + * i40iw_hw_stats_read_64 - Read HW stats counters (greater than 32-bit) and accommodates for roll-overs. + * @stats: pestat struct + * @index: index in HW stats table which contains offset reg-addr + * @value: hw stats value + */ +void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats, + enum i40iw_hw_stats_index_64b index, + u64 *value) +{ + struct i40iw_dev_hw_stats_offsets *stats_table = + &stats->hw_stats_offsets; + struct i40iw_dev_hw_stats *last_rd_stats = &stats->last_read_hw_stats; + struct i40iw_dev_hw_stats *hw_stats = &stats->hw_stats; + u64 new_stats_value = 0; + u32 stats_reg_offset = stats_table->stats_offset_64[index]; + + new_stats_value = readq(stats->hw->hw_addr + stats_reg_offset); + /*roll-over case */ + if (new_stats_value < last_rd_stats->stats_value_64[index]) + hw_stats->stats_value_64[index] += new_stats_value; + else + hw_stats->stats_value_64[index] += + new_stats_value - last_rd_stats->stats_value_64[index]; + last_rd_stats->stats_value_64[index] = new_stats_value; + *value = hw_stats->stats_value_64[index]; +} + +/** + * i40iw_hw_stats_read_all - read all HW stat counters + * @stats: pestat struct + * @stats_values: hw stats structure + * + * Read all the HW stat counters and populates hw_stats structure + * of passed-in vsi's pestat as well as copy created in stat_values. + */ +void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, + struct i40iw_dev_hw_stats *stats_values) +{ + u32 stats_index; + unsigned long flags; + + spin_lock_irqsave(&stats->lock, flags); + + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32; + stats_index++) + i40iw_hw_stats_read_32(stats, stats_index, + &stats_values->stats_value_32[stats_index]); + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64; + stats_index++) + i40iw_hw_stats_read_64(stats, stats_index, + &stats_values->stats_value_64[stats_index]); + spin_unlock_irqrestore(&stats->lock, flags); +} + +/** + * i40iw_hw_stats_refresh_all - Update all HW stats structs + * @stats: pestat struct + * + * Read all the HW stats counters to refresh values in hw_stats structure + * of passed-in dev's pestat + */ +void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats) +{ + u64 stats_value; + u32 stats_index; + unsigned long flags; + + spin_lock_irqsave(&stats->lock, flags); + + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_32; + stats_index++) + i40iw_hw_stats_read_32(stats, stats_index, &stats_value); + for (stats_index = 0; stats_index < I40IW_HW_STAT_INDEX_MAX_64; + stats_index++) + i40iw_hw_stats_read_64(stats, stats_index, &stats_value); + spin_unlock_irqrestore(&stats->lock, flags); +} + +/** + * i40iw_get_fcn_id - Return the function id + * @dev: pointer to the device + */ +static u8 i40iw_get_fcn_id(struct i40iw_sc_dev *dev) +{ + u8 fcn_id = I40IW_INVALID_FCN_ID; + u8 i; + + for (i = I40IW_FIRST_NON_PF_STAT; i < I40IW_MAX_STATS_COUNT; i++) + if (!dev->fcn_id_array[i]) { + fcn_id = i; + dev->fcn_id_array[i] = true; + break; + } + return fcn_id; +} + +/** + * i40iw_vsi_stats_init - Initialize the vsi statistics + * @vsi: pointer to the vsi structure + * @info: The info structure used for initialization + */ +enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info) +{ + u8 fcn_id = info->fcn_id; + + if (info->alloc_fcn_id) + fcn_id = i40iw_get_fcn_id(vsi->dev); + + if (fcn_id == I40IW_INVALID_FCN_ID) + return I40IW_ERR_NOT_READY; + + vsi->pestat = info->pestat; + vsi->pestat->hw = vsi->dev->hw; + vsi->pestat->vsi = vsi; + + if (info->stats_initialize) { + i40iw_hw_stats_init(vsi->pestat, fcn_id, true); + spin_lock_init(&vsi->pestat->lock); + i40iw_hw_stats_start_timer(vsi); + } + vsi->stats_fcn_id_alloc = info->alloc_fcn_id; + vsi->fcn_id = fcn_id; + return I40IW_SUCCESS; +} + +/** + * i40iw_vsi_stats_free - Free the vsi stats + * @vsi: pointer to the vsi structure + */ +void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi) +{ + u8 fcn_id = vsi->fcn_id; + + if (vsi->stats_fcn_id_alloc && fcn_id < I40IW_MAX_STATS_COUNT) + vsi->dev->fcn_id_array[fcn_id] = false; + i40iw_hw_stats_stop_timer(vsi); +} + +static struct i40iw_cqp_ops iw_cqp_ops = { + .cqp_init = i40iw_sc_cqp_init, + .cqp_create = i40iw_sc_cqp_create, + .cqp_post_sq = i40iw_sc_cqp_post_sq, + .cqp_get_next_send_wqe = i40iw_sc_cqp_get_next_send_wqe, + .cqp_destroy = i40iw_sc_cqp_destroy, + .poll_for_cqp_op_done = i40iw_sc_poll_for_cqp_op_done +}; + +static struct i40iw_ccq_ops iw_ccq_ops = { + .ccq_init = i40iw_sc_ccq_init, + .ccq_create = i40iw_sc_ccq_create, + .ccq_destroy = i40iw_sc_ccq_destroy, + .ccq_create_done = i40iw_sc_ccq_create_done, + .ccq_get_cqe_info = i40iw_sc_ccq_get_cqe_info, + .ccq_arm = i40iw_sc_ccq_arm +}; + +static struct i40iw_ceq_ops iw_ceq_ops = { + .ceq_init = i40iw_sc_ceq_init, + .ceq_create = i40iw_sc_ceq_create, + .cceq_create_done = i40iw_sc_cceq_create_done, + .cceq_destroy_done = i40iw_sc_cceq_destroy_done, + .cceq_create = i40iw_sc_cceq_create, + .ceq_destroy = i40iw_sc_ceq_destroy, + .process_ceq = i40iw_sc_process_ceq +}; + +static struct i40iw_aeq_ops iw_aeq_ops = { + .aeq_init = i40iw_sc_aeq_init, + .aeq_create = i40iw_sc_aeq_create, + .aeq_destroy = i40iw_sc_aeq_destroy, + .get_next_aeqe = i40iw_sc_get_next_aeqe, + .repost_aeq_entries = i40iw_sc_repost_aeq_entries, + .aeq_create_done = i40iw_sc_aeq_create_done, + .aeq_destroy_done = i40iw_sc_aeq_destroy_done +}; + +/* iwarp pd ops */ +static struct i40iw_pd_ops iw_pd_ops = { + .pd_init = i40iw_sc_pd_init, +}; + +static struct i40iw_priv_qp_ops iw_priv_qp_ops = { + .qp_init = i40iw_sc_qp_init, + .qp_create = i40iw_sc_qp_create, + .qp_modify = i40iw_sc_qp_modify, + .qp_destroy = i40iw_sc_qp_destroy, + .qp_flush_wqes = i40iw_sc_qp_flush_wqes, + .qp_upload_context = i40iw_sc_qp_upload_context, + .qp_setctx = i40iw_sc_qp_setctx, + .qp_send_lsmm = i40iw_sc_send_lsmm, + .qp_send_lsmm_nostag = i40iw_sc_send_lsmm_nostag, + .qp_send_rtt = i40iw_sc_send_rtt, + .qp_post_wqe0 = i40iw_sc_post_wqe0, + .iw_mr_fast_register = i40iw_sc_mr_fast_register +}; + +static struct i40iw_priv_cq_ops iw_priv_cq_ops = { + .cq_init = i40iw_sc_cq_init, + .cq_create = i40iw_sc_cq_create, + .cq_destroy = i40iw_sc_cq_destroy, + .cq_modify = i40iw_sc_cq_modify, +}; + +static struct i40iw_mr_ops iw_mr_ops = { + .alloc_stag = i40iw_sc_alloc_stag, + .mr_reg_non_shared = i40iw_sc_mr_reg_non_shared, + .mr_reg_shared = i40iw_sc_mr_reg_shared, + .dealloc_stag = i40iw_sc_dealloc_stag, + .query_stag = i40iw_sc_query_stag, + .mw_alloc = i40iw_sc_mw_alloc +}; + +static struct i40iw_cqp_misc_ops iw_cqp_misc_ops = { + .manage_push_page = i40iw_sc_manage_push_page, + .manage_hmc_pm_func_table = i40iw_sc_manage_hmc_pm_func_table, + .set_hmc_resource_profile = i40iw_sc_set_hmc_resource_profile, + .commit_fpm_values = i40iw_sc_commit_fpm_values, + .query_fpm_values = i40iw_sc_query_fpm_values, + .static_hmc_pages_allocated = i40iw_sc_static_hmc_pages_allocated, + .add_arp_cache_entry = i40iw_sc_add_arp_cache_entry, + .del_arp_cache_entry = i40iw_sc_del_arp_cache_entry, + .query_arp_cache_entry = i40iw_sc_query_arp_cache_entry, + .manage_apbvt_entry = i40iw_sc_manage_apbvt_entry, + .manage_qhash_table_entry = i40iw_sc_manage_qhash_table_entry, + .alloc_local_mac_ipaddr_table_entry = i40iw_sc_alloc_local_mac_ipaddr_entry, + .add_local_mac_ipaddr_entry = i40iw_sc_add_local_mac_ipaddr_entry, + .del_local_mac_ipaddr_entry = i40iw_sc_del_local_mac_ipaddr_entry, + .cqp_nop = i40iw_sc_cqp_nop, + .commit_fpm_values_done = i40iw_sc_commit_fpm_values_done, + .query_fpm_values_done = i40iw_sc_query_fpm_values_done, + .manage_hmc_pm_func_table_done = i40iw_sc_manage_hmc_pm_func_table_done, + .update_suspend_qp = i40iw_sc_suspend_qp, + .update_resume_qp = i40iw_sc_resume_qp +}; + +static struct i40iw_hmc_ops iw_hmc_ops = { + .init_iw_hmc = i40iw_sc_init_iw_hmc, + .parse_fpm_query_buf = i40iw_sc_parse_fpm_query_buf, + .configure_iw_fpm = i40iw_sc_configure_iw_fpm, + .parse_fpm_commit_buf = i40iw_sc_parse_fpm_commit_buf, + .create_hmc_object = i40iw_sc_create_hmc_obj, + .del_hmc_object = i40iw_sc_del_hmc_obj +}; + +/** + * i40iw_device_init - Initialize IWARP device + * @dev: IWARP device pointer + * @info: IWARP init info + */ +enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, + struct i40iw_device_init_info *info) +{ + u32 val; + u32 vchnl_ver = 0; + u16 hmc_fcn = 0; + enum i40iw_status_code ret_code = 0; + u8 db_size; + + spin_lock_init(&dev->cqp_lock); + + i40iw_device_init_uk(&dev->dev_uk); + + dev->debug_mask = info->debug_mask; + + dev->hmc_fn_id = info->hmc_fn_id; + dev->is_pf = info->is_pf; + + dev->fpm_query_buf_pa = info->fpm_query_buf_pa; + dev->fpm_query_buf = info->fpm_query_buf; + + dev->fpm_commit_buf_pa = info->fpm_commit_buf_pa; + dev->fpm_commit_buf = info->fpm_commit_buf; + + dev->hw = info->hw; + dev->hw->hw_addr = info->bar0; + + if (dev->is_pf) { + val = i40iw_rd32(dev->hw, I40E_GLPCI_DREVID); + dev->hw_rev = (u8)RS_32(val, I40E_GLPCI_DREVID_DEFAULT_REVID); + + val = i40iw_rd32(dev->hw, I40E_GLPCI_LBARCTRL); + db_size = (u8)RS_32(val, I40E_GLPCI_LBARCTRL_PE_DB_SIZE); + if ((db_size != I40IW_PE_DB_SIZE_4M) && + (db_size != I40IW_PE_DB_SIZE_8M)) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s: PE doorbell is not enabled in CSR val 0x%x\n", + __func__, val); + ret_code = I40IW_ERR_PE_DOORBELL_NOT_ENABLED; + return ret_code; + } + dev->db_addr = dev->hw->hw_addr + I40IW_DB_ADDR_OFFSET; + dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_pf; + } else { + dev->db_addr = dev->hw->hw_addr + I40IW_VF_DB_ADDR_OFFSET; + } + + dev->cqp_ops = &iw_cqp_ops; + dev->ccq_ops = &iw_ccq_ops; + dev->ceq_ops = &iw_ceq_ops; + dev->aeq_ops = &iw_aeq_ops; + dev->cqp_misc_ops = &iw_cqp_misc_ops; + dev->iw_pd_ops = &iw_pd_ops; + dev->iw_priv_qp_ops = &iw_priv_qp_ops; + dev->iw_priv_cq_ops = &iw_priv_cq_ops; + dev->mr_ops = &iw_mr_ops; + dev->hmc_ops = &iw_hmc_ops; + dev->vchnl_if.vchnl_send = info->vchnl_send; + if (dev->vchnl_if.vchnl_send) + dev->vchnl_up = true; + else + dev->vchnl_up = false; + if (!dev->is_pf) { + dev->vchnl_if.vchnl_recv = i40iw_vchnl_recv_vf; + ret_code = i40iw_vchnl_vf_get_ver(dev, &vchnl_ver); + if (!ret_code) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s: Get Channel version rc = 0x%0x, version is %u\n", + __func__, ret_code, vchnl_ver); + ret_code = i40iw_vchnl_vf_get_hmc_fcn(dev, &hmc_fcn); + if (!ret_code) { + i40iw_debug(dev, I40IW_DEBUG_DEV, + "%s Get HMC function rc = 0x%0x, hmc fcn is %u\n", + __func__, ret_code, hmc_fcn); + dev->hmc_fn_id = (u8)hmc_fcn; + } + } + } + dev->iw_vf_cqp_ops = &iw_vf_cqp_ops; + + return ret_code; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_d.h b/drivers/infiniband/hw/i40iw/i40iw_d.h new file mode 100644 index 000000000..6ddaeec87 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_d.h @@ -0,0 +1,1737 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_D_H +#define I40IW_D_H + +#define I40IW_FIRST_USER_QP_ID 2 + +#define I40IW_DB_ADDR_OFFSET (4 * 1024 * 1024 - 64 * 1024) +#define I40IW_VF_DB_ADDR_OFFSET (64 * 1024) + +#define I40IW_PUSH_OFFSET (4 * 1024 * 1024) +#define I40IW_PF_FIRST_PUSH_PAGE_INDEX 16 +#define I40IW_VF_PUSH_OFFSET ((8 + 64) * 1024) +#define I40IW_VF_FIRST_PUSH_PAGE_INDEX 2 + +#define I40IW_PE_DB_SIZE_4M 1 +#define I40IW_PE_DB_SIZE_8M 2 + +#define I40IW_DDP_VER 1 +#define I40IW_RDMAP_VER 1 + +#define I40IW_RDMA_MODE_RDMAC 0 +#define I40IW_RDMA_MODE_IETF 1 + +#define I40IW_QP_STATE_INVALID 0 +#define I40IW_QP_STATE_IDLE 1 +#define I40IW_QP_STATE_RTS 2 +#define I40IW_QP_STATE_CLOSING 3 +#define I40IW_QP_STATE_RESERVED 4 +#define I40IW_QP_STATE_TERMINATE 5 +#define I40IW_QP_STATE_ERROR 6 + +#define I40IW_STAG_STATE_INVALID 0 +#define I40IW_STAG_STATE_VALID 1 + +#define I40IW_STAG_TYPE_SHARED 0 +#define I40IW_STAG_TYPE_NONSHARED 1 + +#define I40IW_MAX_USER_PRIORITY 8 +#define I40IW_MAX_STATS_COUNT 16 +#define I40IW_FIRST_NON_PF_STAT 4 + + +#define I40IW_MTU_TO_MSS_IPV4 40 +#define I40IW_MTU_TO_MSS_IPV6 60 +#define I40IW_DEFAULT_MTU 1500 + +#define LS_64_1(val, bits) ((u64)(uintptr_t)val << bits) +#define RS_64_1(val, bits) ((u64)(uintptr_t)val >> bits) +#define LS_32_1(val, bits) (u32)(val << bits) +#define RS_32_1(val, bits) (u32)(val >> bits) +#define I40E_HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF)) + +#define QS_HANDLE_UNKNOWN 0xffff + +#define LS_64(val, field) (((u64)val << field ## _SHIFT) & (field ## _MASK)) + +#define RS_64(val, field) ((u64)(val & field ## _MASK) >> field ## _SHIFT) +#define LS_32(val, field) ((val << field ## _SHIFT) & (field ## _MASK)) +#define RS_32(val, field) ((val & field ## _MASK) >> field ## _SHIFT) + +#define TERM_DDP_LEN_TAGGED 14 +#define TERM_DDP_LEN_UNTAGGED 18 +#define TERM_RDMA_LEN 28 +#define RDMA_OPCODE_MASK 0x0f +#define RDMA_READ_REQ_OPCODE 1 +#define Q2_BAD_FRAME_OFFSET 72 +#define Q2_FPSN_OFFSET 64 +#define CQE_MAJOR_DRV 0x8000 + +#define I40IW_TERM_SENT 0x01 +#define I40IW_TERM_RCVD 0x02 +#define I40IW_TERM_DONE 0x04 +#define I40IW_MAC_HLEN 14 + +#define I40IW_INVALID_WQE_INDEX 0xffffffff + +#define I40IW_CQP_WAIT_POLL_REGS 1 +#define I40IW_CQP_WAIT_POLL_CQ 2 +#define I40IW_CQP_WAIT_EVENT 3 + +#define I40IW_CQP_INIT_WQE(wqe) memset(wqe, 0, 64) + +#define I40IW_GET_CURRENT_CQ_ELEMENT(_cq) \ + ( \ + &((_cq)->cq_base[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \ + ) +#define I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(_cq) \ + ( \ + &(((struct i40iw_extended_cqe *) \ + ((_cq)->cq_base))[I40IW_RING_GETCURRENT_HEAD((_cq)->cq_ring)]) \ + ) + +#define I40IW_GET_CURRENT_AEQ_ELEMENT(_aeq) \ + ( \ + &_aeq->aeqe_base[I40IW_RING_GETCURRENT_TAIL(_aeq->aeq_ring)] \ + ) + +#define I40IW_GET_CURRENT_CEQ_ELEMENT(_ceq) \ + ( \ + &_ceq->ceqe_base[I40IW_RING_GETCURRENT_TAIL(_ceq->ceq_ring)] \ + ) + +#define I40IW_AE_SOURCE_RSVD 0x0 +#define I40IW_AE_SOURCE_RQ 0x1 +#define I40IW_AE_SOURCE_RQ_0011 0x3 + +#define I40IW_AE_SOURCE_CQ 0x2 +#define I40IW_AE_SOURCE_CQ_0110 0x6 +#define I40IW_AE_SOURCE_CQ_1010 0xA +#define I40IW_AE_SOURCE_CQ_1110 0xE + +#define I40IW_AE_SOURCE_SQ 0x5 +#define I40IW_AE_SOURCE_SQ_0111 0x7 + +#define I40IW_AE_SOURCE_IN_RR_WR 0x9 +#define I40IW_AE_SOURCE_IN_RR_WR_1011 0xB +#define I40IW_AE_SOURCE_OUT_RR 0xD +#define I40IW_AE_SOURCE_OUT_RR_1111 0xF + +#define I40IW_TCP_STATE_NON_EXISTENT 0 +#define I40IW_TCP_STATE_CLOSED 1 +#define I40IW_TCP_STATE_LISTEN 2 +#define I40IW_STATE_SYN_SEND 3 +#define I40IW_TCP_STATE_SYN_RECEIVED 4 +#define I40IW_TCP_STATE_ESTABLISHED 5 +#define I40IW_TCP_STATE_CLOSE_WAIT 6 +#define I40IW_TCP_STATE_FIN_WAIT_1 7 +#define I40IW_TCP_STATE_CLOSING 8 +#define I40IW_TCP_STATE_LAST_ACK 9 +#define I40IW_TCP_STATE_FIN_WAIT_2 10 +#define I40IW_TCP_STATE_TIME_WAIT 11 +#define I40IW_TCP_STATE_RESERVED_1 12 +#define I40IW_TCP_STATE_RESERVED_2 13 +#define I40IW_TCP_STATE_RESERVED_3 14 +#define I40IW_TCP_STATE_RESERVED_4 15 + +/* ILQ CQP hash table fields */ +#define I40IW_CQPSQ_QHASH_VLANID_SHIFT 32 +#define I40IW_CQPSQ_QHASH_VLANID_MASK \ + ((u64)0xfff << I40IW_CQPSQ_QHASH_VLANID_SHIFT) + +#define I40IW_CQPSQ_QHASH_QPN_SHIFT 32 +#define I40IW_CQPSQ_QHASH_QPN_MASK \ + ((u64)0x3ffff << I40IW_CQPSQ_QHASH_QPN_SHIFT) + +#define I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT 0 +#define I40IW_CQPSQ_QHASH_QS_HANDLE_MASK ((u64)0x3ff << I40IW_CQPSQ_QHASH_QS_HANDLE_SHIFT) + +#define I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT 16 +#define I40IW_CQPSQ_QHASH_SRC_PORT_MASK \ + ((u64)0xffff << I40IW_CQPSQ_QHASH_SRC_PORT_SHIFT) + +#define I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT 0 +#define I40IW_CQPSQ_QHASH_DEST_PORT_MASK \ + ((u64)0xffff << I40IW_CQPSQ_QHASH_DEST_PORT_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR0_SHIFT 32 +#define I40IW_CQPSQ_QHASH_ADDR0_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR0_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR1_SHIFT 0 +#define I40IW_CQPSQ_QHASH_ADDR1_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR1_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR2_SHIFT 32 +#define I40IW_CQPSQ_QHASH_ADDR2_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR2_SHIFT) + +#define I40IW_CQPSQ_QHASH_ADDR3_SHIFT 0 +#define I40IW_CQPSQ_QHASH_ADDR3_MASK \ + ((u64)0xffffffff << I40IW_CQPSQ_QHASH_ADDR3_SHIFT) + +#define I40IW_CQPSQ_QHASH_WQEVALID_SHIFT 63 +#define I40IW_CQPSQ_QHASH_WQEVALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_WQEVALID_SHIFT) +#define I40IW_CQPSQ_QHASH_OPCODE_SHIFT 32 +#define I40IW_CQPSQ_QHASH_OPCODE_MASK \ + ((u64)0x3f << I40IW_CQPSQ_QHASH_OPCODE_SHIFT) + +#define I40IW_CQPSQ_QHASH_MANAGE_SHIFT 61 +#define I40IW_CQPSQ_QHASH_MANAGE_MASK \ + ((u64)0x3 << I40IW_CQPSQ_QHASH_MANAGE_SHIFT) + +#define I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT 60 +#define I40IW_CQPSQ_QHASH_IPV4VALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_IPV4VALID_SHIFT) + +#define I40IW_CQPSQ_QHASH_VLANVALID_SHIFT 59 +#define I40IW_CQPSQ_QHASH_VLANVALID_MASK \ + ((u64)0x1 << I40IW_CQPSQ_QHASH_VLANVALID_SHIFT) + +#define I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT 42 +#define I40IW_CQPSQ_QHASH_ENTRYTYPE_MASK \ + ((u64)0x7 << I40IW_CQPSQ_QHASH_ENTRYTYPE_SHIFT) +/* CQP Host Context */ +#define I40IW_CQPHC_EN_DC_TCP_SHIFT 0 +#define I40IW_CQPHC_EN_DC_TCP_MASK (1UL << I40IW_CQPHC_EN_DC_TCP_SHIFT) + +#define I40IW_CQPHC_SQSIZE_SHIFT 8 +#define I40IW_CQPHC_SQSIZE_MASK (0xfUL << I40IW_CQPHC_SQSIZE_SHIFT) + +#define I40IW_CQPHC_DISABLE_PFPDUS_SHIFT 1 +#define I40IW_CQPHC_DISABLE_PFPDUS_MASK (0x1UL << I40IW_CQPHC_DISABLE_PFPDUS_SHIFT) + +#define I40IW_CQPHC_ENABLED_VFS_SHIFT 32 +#define I40IW_CQPHC_ENABLED_VFS_MASK (0x3fULL << I40IW_CQPHC_ENABLED_VFS_SHIFT) + +#define I40IW_CQPHC_HMC_PROFILE_SHIFT 0 +#define I40IW_CQPHC_HMC_PROFILE_MASK (0x7ULL << I40IW_CQPHC_HMC_PROFILE_SHIFT) + +#define I40IW_CQPHC_SVER_SHIFT 24 +#define I40IW_CQPHC_SVER_MASK (0xffUL << I40IW_CQPHC_SVER_SHIFT) + +#define I40IW_CQPHC_SQBASE_SHIFT 9 +#define I40IW_CQPHC_SQBASE_MASK \ + (0xfffffffffffffeULL << I40IW_CQPHC_SQBASE_SHIFT) + +#define I40IW_CQPHC_QPCTX_SHIFT 0 +#define I40IW_CQPHC_QPCTX_MASK \ + (0xffffffffffffffffULL << I40IW_CQPHC_QPCTX_SHIFT) +#define I40IW_CQPHC_SVER 1 + +#define I40IW_CQP_SW_SQSIZE_4 4 +#define I40IW_CQP_SW_SQSIZE_2048 2048 + +/* iWARP QP Doorbell shadow area */ +#define I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT 0 +#define I40IW_QP_DBSA_HW_SQ_TAIL_MASK \ + (0x3fffUL << I40IW_QP_DBSA_HW_SQ_TAIL_SHIFT) + +/* Completion Queue Doorbell shadow area */ +#define I40IW_CQ_DBSA_CQEIDX_SHIFT 0 +#define I40IW_CQ_DBSA_CQEIDX_MASK (0xfffffUL << I40IW_CQ_DBSA_CQEIDX_SHIFT) + +#define I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT 0 +#define I40IW_CQ_DBSA_SW_CQ_SELECT_MASK \ + (0x3fffUL << I40IW_CQ_DBSA_SW_CQ_SELECT_SHIFT) + +#define I40IW_CQ_DBSA_ARM_NEXT_SHIFT 14 +#define I40IW_CQ_DBSA_ARM_NEXT_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SHIFT) + +#define I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT 15 +#define I40IW_CQ_DBSA_ARM_NEXT_SE_MASK (1UL << I40IW_CQ_DBSA_ARM_NEXT_SE_SHIFT) + +#define I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT 16 +#define I40IW_CQ_DBSA_ARM_SEQ_NUM_MASK \ + (0x3UL << I40IW_CQ_DBSA_ARM_SEQ_NUM_SHIFT) + +/* CQP and iWARP Completion Queue */ +#define I40IW_CQ_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQ_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CCQ_OPRETVAL_SHIFT 0 +#define I40IW_CCQ_OPRETVAL_MASK (0xffffffffUL << I40IW_CCQ_OPRETVAL_SHIFT) + +#define I40IW_CQ_MINERR_SHIFT 0 +#define I40IW_CQ_MINERR_MASK (0xffffUL << I40IW_CQ_MINERR_SHIFT) + +#define I40IW_CQ_MAJERR_SHIFT 16 +#define I40IW_CQ_MAJERR_MASK (0xffffUL << I40IW_CQ_MAJERR_SHIFT) + +#define I40IW_CQ_WQEIDX_SHIFT 32 +#define I40IW_CQ_WQEIDX_MASK (0x3fffULL << I40IW_CQ_WQEIDX_SHIFT) + +#define I40IW_CQ_ERROR_SHIFT 55 +#define I40IW_CQ_ERROR_MASK (1ULL << I40IW_CQ_ERROR_SHIFT) + +#define I40IW_CQ_SQ_SHIFT 62 +#define I40IW_CQ_SQ_MASK (1ULL << I40IW_CQ_SQ_SHIFT) + +#define I40IW_CQ_VALID_SHIFT 63 +#define I40IW_CQ_VALID_MASK (1ULL << I40IW_CQ_VALID_SHIFT) + +#define I40IWCQ_PAYLDLEN_SHIFT 0 +#define I40IWCQ_PAYLDLEN_MASK (0xffffffffUL << I40IWCQ_PAYLDLEN_SHIFT) + +#define I40IWCQ_TCPSEQNUM_SHIFT 32 +#define I40IWCQ_TCPSEQNUM_MASK (0xffffffffULL << I40IWCQ_TCPSEQNUM_SHIFT) + +#define I40IWCQ_INVSTAG_SHIFT 0 +#define I40IWCQ_INVSTAG_MASK (0xffffffffUL << I40IWCQ_INVSTAG_SHIFT) + +#define I40IWCQ_QPID_SHIFT 32 +#define I40IWCQ_QPID_MASK (0x3ffffULL << I40IWCQ_QPID_SHIFT) + +#define I40IWCQ_PSHDROP_SHIFT 51 +#define I40IWCQ_PSHDROP_MASK (1ULL << I40IWCQ_PSHDROP_SHIFT) + +#define I40IWCQ_SRQ_SHIFT 52 +#define I40IWCQ_SRQ_MASK (1ULL << I40IWCQ_SRQ_SHIFT) + +#define I40IWCQ_STAG_SHIFT 53 +#define I40IWCQ_STAG_MASK (1ULL << I40IWCQ_STAG_SHIFT) + +#define I40IWCQ_SOEVENT_SHIFT 54 +#define I40IWCQ_SOEVENT_MASK (1ULL << I40IWCQ_SOEVENT_SHIFT) + +#define I40IWCQ_OP_SHIFT 56 +#define I40IWCQ_OP_MASK (0x3fULL << I40IWCQ_OP_SHIFT) + +/* CEQE format */ +#define I40IW_CEQE_CQCTX_SHIFT 0 +#define I40IW_CEQE_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CEQE_CQCTX_SHIFT) + +#define I40IW_CEQE_VALID_SHIFT 63 +#define I40IW_CEQE_VALID_MASK (1ULL << I40IW_CEQE_VALID_SHIFT) + +/* AEQE format */ +#define I40IW_AEQE_COMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_AEQE_COMPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_AEQE_QPCQID_SHIFT 0 +#define I40IW_AEQE_QPCQID_MASK (0x3ffffUL << I40IW_AEQE_QPCQID_SHIFT) + +#define I40IW_AEQE_WQDESCIDX_SHIFT 18 +#define I40IW_AEQE_WQDESCIDX_MASK (0x3fffULL << I40IW_AEQE_WQDESCIDX_SHIFT) + +#define I40IW_AEQE_OVERFLOW_SHIFT 33 +#define I40IW_AEQE_OVERFLOW_MASK (1ULL << I40IW_AEQE_OVERFLOW_SHIFT) + +#define I40IW_AEQE_AECODE_SHIFT 34 +#define I40IW_AEQE_AECODE_MASK (0xffffULL << I40IW_AEQE_AECODE_SHIFT) + +#define I40IW_AEQE_AESRC_SHIFT 50 +#define I40IW_AEQE_AESRC_MASK (0xfULL << I40IW_AEQE_AESRC_SHIFT) + +#define I40IW_AEQE_IWSTATE_SHIFT 54 +#define I40IW_AEQE_IWSTATE_MASK (0x7ULL << I40IW_AEQE_IWSTATE_SHIFT) + +#define I40IW_AEQE_TCPSTATE_SHIFT 57 +#define I40IW_AEQE_TCPSTATE_MASK (0xfULL << I40IW_AEQE_TCPSTATE_SHIFT) + +#define I40IW_AEQE_Q2DATA_SHIFT 61 +#define I40IW_AEQE_Q2DATA_MASK (0x3ULL << I40IW_AEQE_Q2DATA_SHIFT) + +#define I40IW_AEQE_VALID_SHIFT 63 +#define I40IW_AEQE_VALID_MASK (1ULL << I40IW_AEQE_VALID_SHIFT) + +/* CQP SQ WQES */ +#define I40IW_QP_TYPE_IWARP 1 +#define I40IW_QP_TYPE_UDA 2 +#define I40IW_QP_TYPE_CQP 4 + +#define I40IW_CQ_TYPE_IWARP 1 +#define I40IW_CQ_TYPE_ILQ 2 +#define I40IW_CQ_TYPE_IEQ 3 +#define I40IW_CQ_TYPE_CQP 4 + +#define I40IWQP_TERM_SEND_TERM_AND_FIN 0 +#define I40IWQP_TERM_SEND_TERM_ONLY 1 +#define I40IWQP_TERM_SEND_FIN_ONLY 2 +#define I40IWQP_TERM_DONOT_SEND_TERM_OR_FIN 3 + +#define I40IW_CQP_OP_CREATE_QP 0 +#define I40IW_CQP_OP_MODIFY_QP 0x1 +#define I40IW_CQP_OP_DESTROY_QP 0x02 +#define I40IW_CQP_OP_CREATE_CQ 0x03 +#define I40IW_CQP_OP_MODIFY_CQ 0x04 +#define I40IW_CQP_OP_DESTROY_CQ 0x05 +#define I40IW_CQP_OP_CREATE_SRQ 0x06 +#define I40IW_CQP_OP_MODIFY_SRQ 0x07 +#define I40IW_CQP_OP_DESTROY_SRQ 0x08 +#define I40IW_CQP_OP_ALLOC_STAG 0x09 +#define I40IW_CQP_OP_REG_MR 0x0a +#define I40IW_CQP_OP_QUERY_STAG 0x0b +#define I40IW_CQP_OP_REG_SMR 0x0c +#define I40IW_CQP_OP_DEALLOC_STAG 0x0d +#define I40IW_CQP_OP_MANAGE_LOC_MAC_IP_TABLE 0x0e +#define I40IW_CQP_OP_MANAGE_ARP 0x0f +#define I40IW_CQP_OP_MANAGE_VF_PBLE_BP 0x10 +#define I40IW_CQP_OP_MANAGE_PUSH_PAGES 0x11 +#define I40IW_CQP_OP_MANAGE_PE_TEAM 0x12 +#define I40IW_CQP_OP_UPLOAD_CONTEXT 0x13 +#define I40IW_CQP_OP_ALLOCATE_LOC_MAC_IP_TABLE_ENTRY 0x14 +#define I40IW_CQP_OP_MANAGE_HMC_PM_FUNC_TABLE 0x15 +#define I40IW_CQP_OP_CREATE_CEQ 0x16 +#define I40IW_CQP_OP_DESTROY_CEQ 0x18 +#define I40IW_CQP_OP_CREATE_AEQ 0x19 +#define I40IW_CQP_OP_DESTROY_AEQ 0x1b +#define I40IW_CQP_OP_CREATE_ADDR_VECT 0x1c +#define I40IW_CQP_OP_MODIFY_ADDR_VECT 0x1d +#define I40IW_CQP_OP_DESTROY_ADDR_VECT 0x1e +#define I40IW_CQP_OP_UPDATE_PE_SDS 0x1f +#define I40IW_CQP_OP_QUERY_FPM_VALUES 0x20 +#define I40IW_CQP_OP_COMMIT_FPM_VALUES 0x21 +#define I40IW_CQP_OP_FLUSH_WQES 0x22 +/* I40IW_CQP_OP_GEN_AE is the same value as I40IW_CQP_OP_FLUSH_WQES */ +#define I40IW_CQP_OP_GEN_AE 0x22 +#define I40IW_CQP_OP_MANAGE_APBVT 0x23 +#define I40IW_CQP_OP_NOP 0x24 +#define I40IW_CQP_OP_MANAGE_QUAD_HASH_TABLE_ENTRY 0x25 +#define I40IW_CQP_OP_CREATE_UDA_MCAST_GROUP 0x26 +#define I40IW_CQP_OP_MODIFY_UDA_MCAST_GROUP 0x27 +#define I40IW_CQP_OP_DESTROY_UDA_MCAST_GROUP 0x28 +#define I40IW_CQP_OP_SUSPEND_QP 0x29 +#define I40IW_CQP_OP_RESUME_QP 0x2a +#define I40IW_CQP_OP_SHMC_PAGES_ALLOCATED 0x2b +#define I40IW_CQP_OP_SET_HMC_RESOURCE_PROFILE 0x2d + +#define I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT 16 +#define I40IW_UDA_QPSQ_NEXT_HEADER_MASK ((u64)0xff << I40IW_UDA_QPSQ_NEXT_HEADER_SHIFT) + +#define I40IW_UDA_QPSQ_OPCODE_SHIFT 32 +#define I40IW_UDA_QPSQ_OPCODE_MASK ((u64)0x3f << I40IW_UDA_QPSQ_OPCODE_SHIFT) + +#define I40IW_UDA_QPSQ_MACLEN_SHIFT 56 +#define I40IW_UDA_QPSQ_MACLEN_MASK \ + ((u64)0x7f << I40IW_UDA_QPSQ_MACLEN_SHIFT) + +#define I40IW_UDA_QPSQ_IPLEN_SHIFT 48 +#define I40IW_UDA_QPSQ_IPLEN_MASK \ + ((u64)0x7f << I40IW_UDA_QPSQ_IPLEN_SHIFT) + +#define I40IW_UDA_QPSQ_L4T_SHIFT 30 +#define I40IW_UDA_QPSQ_L4T_MASK \ + ((u64)0x3 << I40IW_UDA_QPSQ_L4T_SHIFT) + +#define I40IW_UDA_QPSQ_IIPT_SHIFT 28 +#define I40IW_UDA_QPSQ_IIPT_MASK \ + ((u64)0x3 << I40IW_UDA_QPSQ_IIPT_SHIFT) + +#define I40IW_UDA_QPSQ_L4LEN_SHIFT 24 +#define I40IW_UDA_QPSQ_L4LEN_MASK ((u64)0xf << I40IW_UDA_QPSQ_L4LEN_SHIFT) + +#define I40IW_UDA_QPSQ_AVIDX_SHIFT 0 +#define I40IW_UDA_QPSQ_AVIDX_MASK ((u64)0xffff << I40IW_UDA_QPSQ_AVIDX_SHIFT) + +#define I40IW_UDA_QPSQ_VALID_SHIFT 63 +#define I40IW_UDA_QPSQ_VALID_MASK \ + ((u64)0x1 << I40IW_UDA_QPSQ_VALID_SHIFT) + +#define I40IW_UDA_QPSQ_SIGCOMPL_SHIFT 62 +#define I40IW_UDA_QPSQ_SIGCOMPL_MASK ((u64)0x1 << I40IW_UDA_QPSQ_SIGCOMPL_SHIFT) + +#define I40IW_UDA_PAYLOADLEN_SHIFT 0 +#define I40IW_UDA_PAYLOADLEN_MASK ((u64)0x3fff << I40IW_UDA_PAYLOADLEN_SHIFT) + +#define I40IW_UDA_HDRLEN_SHIFT 16 +#define I40IW_UDA_HDRLEN_MASK ((u64)0x1ff << I40IW_UDA_HDRLEN_SHIFT) + +#define I40IW_VLAN_TAG_VALID_SHIFT 50 +#define I40IW_VLAN_TAG_VALID_MASK ((u64)0x1 << I40IW_VLAN_TAG_VALID_SHIFT) + +#define I40IW_UDA_L3PROTO_SHIFT 0 +#define I40IW_UDA_L3PROTO_MASK ((u64)0x3 << I40IW_UDA_L3PROTO_SHIFT) + +#define I40IW_UDA_L4PROTO_SHIFT 16 +#define I40IW_UDA_L4PROTO_MASK ((u64)0x3 << I40IW_UDA_L4PROTO_SHIFT) + +#define I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT 44 +#define I40IW_UDA_QPSQ_DOLOOPBACK_MASK \ + ((u64)0x1 << I40IW_UDA_QPSQ_DOLOOPBACK_SHIFT) + +/* CQP SQ WQE common fields */ +#define I40IW_CQPSQ_OPCODE_SHIFT 32 +#define I40IW_CQPSQ_OPCODE_MASK (0x3fULL << I40IW_CQPSQ_OPCODE_SHIFT) + +#define I40IW_CQPSQ_WQEVALID_SHIFT 63 +#define I40IW_CQPSQ_WQEVALID_MASK (1ULL << I40IW_CQPSQ_WQEVALID_SHIFT) + +#define I40IW_CQPSQ_TPHVAL_SHIFT 0 +#define I40IW_CQPSQ_TPHVAL_MASK (0xffUL << I40IW_CQPSQ_TPHVAL_SHIFT) + +#define I40IW_CQPSQ_TPHEN_SHIFT 60 +#define I40IW_CQPSQ_TPHEN_MASK (1ULL << I40IW_CQPSQ_TPHEN_SHIFT) + +#define I40IW_CQPSQ_PBUFADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_PBUFADDR_MASK I40IW_CQPHC_QPCTX_MASK + +/* Create/Modify/Destroy QP */ + +#define I40IW_CQPSQ_QP_NEWMSS_SHIFT 32 +#define I40IW_CQPSQ_QP_NEWMSS_MASK (0x3fffULL << I40IW_CQPSQ_QP_NEWMSS_SHIFT) + +#define I40IW_CQPSQ_QP_TERMLEN_SHIFT 48 +#define I40IW_CQPSQ_QP_TERMLEN_MASK (0xfULL << I40IW_CQPSQ_QP_TERMLEN_SHIFT) + +#define I40IW_CQPSQ_QP_QPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_QP_QPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_QP_QPID_SHIFT 0 +#define I40IW_CQPSQ_QP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +#define I40IW_CQPSQ_QP_OP_SHIFT 32 +#define I40IW_CQPSQ_QP_OP_MASK I40IWCQ_OP_MASK + +#define I40IW_CQPSQ_QP_ORDVALID_SHIFT 42 +#define I40IW_CQPSQ_QP_ORDVALID_MASK (1ULL << I40IW_CQPSQ_QP_ORDVALID_SHIFT) + +#define I40IW_CQPSQ_QP_TOECTXVALID_SHIFT 43 +#define I40IW_CQPSQ_QP_TOECTXVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_TOECTXVALID_SHIFT) + +#define I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT 44 +#define I40IW_CQPSQ_QP_CACHEDVARVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_CACHEDVARVALID_SHIFT) + +#define I40IW_CQPSQ_QP_VQ_SHIFT 45 +#define I40IW_CQPSQ_QP_VQ_MASK (1ULL << I40IW_CQPSQ_QP_VQ_SHIFT) + +#define I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT 46 +#define I40IW_CQPSQ_QP_FORCELOOPBACK_MASK \ + (1ULL << I40IW_CQPSQ_QP_FORCELOOPBACK_SHIFT) + +#define I40IW_CQPSQ_QP_CQNUMVALID_SHIFT 47 +#define I40IW_CQPSQ_QP_CQNUMVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_CQNUMVALID_SHIFT) + +#define I40IW_CQPSQ_QP_QPTYPE_SHIFT 48 +#define I40IW_CQPSQ_QP_QPTYPE_MASK (0x3ULL << I40IW_CQPSQ_QP_QPTYPE_SHIFT) + +#define I40IW_CQPSQ_QP_MSSCHANGE_SHIFT 52 +#define I40IW_CQPSQ_QP_MSSCHANGE_MASK (1ULL << I40IW_CQPSQ_QP_MSSCHANGE_SHIFT) + +#define I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT 54 +#define I40IW_CQPSQ_QP_IGNOREMWBOUND_MASK \ + (1ULL << I40IW_CQPSQ_QP_IGNOREMWBOUND_SHIFT) + +#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT 55 +#define I40IW_CQPSQ_QP_REMOVEHASHENTRY_MASK \ + (1ULL << I40IW_CQPSQ_QP_REMOVEHASHENTRY_SHIFT) + +#define I40IW_CQPSQ_QP_TERMACT_SHIFT 56 +#define I40IW_CQPSQ_QP_TERMACT_MASK (0x3ULL << I40IW_CQPSQ_QP_TERMACT_SHIFT) + +#define I40IW_CQPSQ_QP_RESETCON_SHIFT 58 +#define I40IW_CQPSQ_QP_RESETCON_MASK (1ULL << I40IW_CQPSQ_QP_RESETCON_SHIFT) + +#define I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT 59 +#define I40IW_CQPSQ_QP_ARPTABIDXVALID_MASK \ + (1ULL << I40IW_CQPSQ_QP_ARPTABIDXVALID_SHIFT) + +#define I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT 60 +#define I40IW_CQPSQ_QP_NEXTIWSTATE_MASK \ + (0x7ULL << I40IW_CQPSQ_QP_NEXTIWSTATE_SHIFT) + +#define I40IW_CQPSQ_QP_DBSHADOWADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_QP_DBSHADOWADDR_MASK I40IW_CQPHC_QPCTX_MASK + +/* Create/Modify/Destroy CQ */ +#define I40IW_CQPSQ_CQ_CQSIZE_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQSIZE_MASK (0x3ffffUL << I40IW_CQPSQ_CQ_CQSIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT) + +#define I40IW_CQPSQ_CQ_CQCTX_SHIFT 0 +#define I40IW_CQPSQ_CQ_CQCTX_MASK \ + (0x7fffffffffffffffULL << I40IW_CQPSQ_CQ_CQCTX_SHIFT) + +#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT 0 +#define I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_MASK \ + (0x3ffff << I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD_SHIFT) + +#define I40IW_CQPSQ_CQ_CEQID_SHIFT 24 +#define I40IW_CQPSQ_CQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CQ_CEQID_SHIFT) + +#define I40IW_CQPSQ_CQ_OP_SHIFT 32 +#define I40IW_CQPSQ_CQ_OP_MASK (0x3fULL << I40IW_CQPSQ_CQ_OP_SHIFT) + +#define I40IW_CQPSQ_CQ_CQRESIZE_SHIFT 43 +#define I40IW_CQPSQ_CQ_CQRESIZE_MASK (1ULL << I40IW_CQPSQ_CQ_CQRESIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT 44 +#define I40IW_CQPSQ_CQ_LPBLSIZE_MASK (3ULL << I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT) + +#define I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT 46 +#define I40IW_CQPSQ_CQ_CHKOVERFLOW_MASK \ + (1ULL << I40IW_CQPSQ_CQ_CHKOVERFLOW_SHIFT) + +#define I40IW_CQPSQ_CQ_VIRTMAP_SHIFT 47 +#define I40IW_CQPSQ_CQ_VIRTMAP_MASK (1ULL << I40IW_CQPSQ_CQ_VIRTMAP_SHIFT) + +#define I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT 48 +#define I40IW_CQPSQ_CQ_ENCEQEMASK_MASK \ + (1ULL << I40IW_CQPSQ_CQ_ENCEQEMASK_SHIFT) + +#define I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT 49 +#define I40IW_CQPSQ_CQ_CEQIDVALID_MASK \ + (1ULL << I40IW_CQPSQ_CQ_CEQIDVALID_SHIFT) + +#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT 61 +#define I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_MASK \ + (1ULL << I40IW_CQPSQ_CQ_AVOIDMEMCNFLCT_SHIFT) + +/* Create/Modify/Destroy Shared Receive Queue */ + +#define I40IW_CQPSQ_SRQ_RQSIZE_SHIFT 0 +#define I40IW_CQPSQ_SRQ_RQSIZE_MASK (0xfUL << I40IW_CQPSQ_SRQ_RQSIZE_SHIFT) + +#define I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT 4 +#define I40IW_CQPSQ_SRQ_RQWQESIZE_MASK \ + (0x7UL << I40IW_CQPSQ_SRQ_RQWQESIZE_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT 32 +#define I40IW_CQPSQ_SRQ_SRQLIMIT_MASK \ + (0xfffULL << I40IW_CQPSQ_SRQ_SRQLIMIT_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_SRQ_SRQCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_SRQ_PDID_SHIFT 16 +#define I40IW_CQPSQ_SRQ_PDID_MASK \ + (0x7fffULL << I40IW_CQPSQ_SRQ_PDID_SHIFT) + +#define I40IW_CQPSQ_SRQ_SRQID_SHIFT 0 +#define I40IW_CQPSQ_SRQ_SRQID_MASK (0x7fffUL << I40IW_CQPSQ_SRQ_SRQID_SHIFT) + +#define I40IW_CQPSQ_SRQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_SRQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_SRQ_VIRTMAP_SHIFT I40IW_CQPSQ_CQ_VIRTMAP_SHIFT +#define I40IW_CQPSQ_SRQ_VIRTMAP_MASK I40IW_CQPSQ_CQ_VIRTMAP_MASK + +#define I40IW_CQPSQ_SRQ_TPHEN_SHIFT I40IW_CQPSQ_TPHEN_SHIFT +#define I40IW_CQPSQ_SRQ_TPHEN_MASK I40IW_CQPSQ_TPHEN_MASK + +#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT 61 +#define I40IW_CQPSQ_SRQ_ARMLIMITEVENT_MASK \ + (1ULL << I40IW_CQPSQ_SRQ_ARMLIMITEVENT_SHIFT) + +#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT 6 +#define I40IW_CQPSQ_SRQ_DBSHADOWAREA_MASK \ + (0x3ffffffffffffffULL << I40IW_CQPSQ_SRQ_DBSHADOWAREA_SHIFT) + +#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_SRQ_FIRSTPMPBLIDX_SHIFT) + +/* Allocate/Register/Register Shared/Deallocate Stag */ +#define I40IW_CQPSQ_STAG_VA_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_STAG_VA_FBO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_STAG_STAGLEN_SHIFT 0 +#define I40IW_CQPSQ_STAG_STAGLEN_MASK \ + (0x3fffffffffffULL << I40IW_CQPSQ_STAG_STAGLEN_SHIFT) + +#define I40IW_CQPSQ_STAG_PDID_SHIFT 48 +#define I40IW_CQPSQ_STAG_PDID_MASK (0x7fffULL << I40IW_CQPSQ_STAG_PDID_SHIFT) + +#define I40IW_CQPSQ_STAG_KEY_SHIFT 0 +#define I40IW_CQPSQ_STAG_KEY_MASK (0xffUL << I40IW_CQPSQ_STAG_KEY_SHIFT) + +#define I40IW_CQPSQ_STAG_IDX_SHIFT 8 +#define I40IW_CQPSQ_STAG_IDX_MASK (0xffffffUL << I40IW_CQPSQ_STAG_IDX_SHIFT) + +#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT 32 +#define I40IW_CQPSQ_STAG_PARENTSTAGIDX_MASK \ + (0xffffffULL << I40IW_CQPSQ_STAG_PARENTSTAGIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_MR_SHIFT 43 +#define I40IW_CQPSQ_STAG_MR_MASK (1ULL << I40IW_CQPSQ_STAG_MR_SHIFT) + +#define I40IW_CQPSQ_STAG_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_STAG_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT 46 +#define I40IW_CQPSQ_STAG_HPAGESIZE_MASK \ + (1ULL << I40IW_CQPSQ_STAG_HPAGESIZE_SHIFT) + +#define I40IW_CQPSQ_STAG_ARIGHTS_SHIFT 48 +#define I40IW_CQPSQ_STAG_ARIGHTS_MASK \ + (0x1fULL << I40IW_CQPSQ_STAG_ARIGHTS_SHIFT) + +#define I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT 53 +#define I40IW_CQPSQ_STAG_REMACCENABLED_MASK \ + (1ULL << I40IW_CQPSQ_STAG_REMACCENABLED_SHIFT) + +#define I40IW_CQPSQ_STAG_VABASEDTO_SHIFT 59 +#define I40IW_CQPSQ_STAG_VABASEDTO_MASK \ + (1ULL << I40IW_CQPSQ_STAG_VABASEDTO_SHIFT) + +#define I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT 60 +#define I40IW_CQPSQ_STAG_USEHMCFNIDX_MASK \ + (1ULL << I40IW_CQPSQ_STAG_USEHMCFNIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_USEPFRID_SHIFT 61 +#define I40IW_CQPSQ_STAG_USEPFRID_MASK \ + (1ULL << I40IW_CQPSQ_STAG_USEPFRID_SHIFT) + +#define I40IW_CQPSQ_STAG_PBA_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_STAG_PBA_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT 0 +#define I40IW_CQPSQ_STAG_HMCFNIDX_MASK \ + (0x3fUL << I40IW_CQPSQ_STAG_HMCFNIDX_SHIFT) + +#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_STAG_FIRSTPMPBLIDX_SHIFT) + +/* Query stag */ +#define I40IW_CQPSQ_QUERYSTAG_IDX_SHIFT I40IW_CQPSQ_STAG_IDX_SHIFT +#define I40IW_CQPSQ_QUERYSTAG_IDX_MASK I40IW_CQPSQ_STAG_IDX_MASK + +/* Allocate Local IP Address Entry */ + +/* Manage Local IP Address Table - MLIPA */ +#define I40IW_CQPSQ_MLIPA_IPV6LO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_MLIPA_IPV6LO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_MLIPA_IPV6HI_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_MLIPA_IPV6HI_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_MLIPA_IPV4_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_IPV4_MASK \ + (0xffffffffUL << I40IW_CQPSQ_MLIPA_IPV4_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_IPTABLEIDX_MASK \ + (0x3fUL << I40IW_CQPSQ_MLIPA_IPTABLEIDX_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT 42 +#define I40IW_CQPSQ_MLIPA_IPV4VALID_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IPV4VALID_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT 43 +#define I40IW_CQPSQ_MLIPA_IPV6VALID_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IPV6VALID_SHIFT) + +#define I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT 62 +#define I40IW_CQPSQ_MLIPA_FREEENTRY_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_FREEENTRY_SHIFT) + +#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT 61 +#define I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_MASK \ + (1ULL << I40IW_CQPSQ_MLIPA_IGNORE_REF_CNT_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC0_SHIFT 0 +#define I40IW_CQPSQ_MLIPA_MAC0_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC0_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC1_SHIFT 8 +#define I40IW_CQPSQ_MLIPA_MAC1_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC1_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC2_SHIFT 16 +#define I40IW_CQPSQ_MLIPA_MAC2_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC2_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC3_SHIFT 24 +#define I40IW_CQPSQ_MLIPA_MAC3_MASK (0xffUL << I40IW_CQPSQ_MLIPA_MAC3_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC4_SHIFT 32 +#define I40IW_CQPSQ_MLIPA_MAC4_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC4_SHIFT) + +#define I40IW_CQPSQ_MLIPA_MAC5_SHIFT 40 +#define I40IW_CQPSQ_MLIPA_MAC5_MASK (0xffULL << I40IW_CQPSQ_MLIPA_MAC5_SHIFT) + +/* Manage ARP Table - MAT */ +#define I40IW_CQPSQ_MAT_REACHMAX_SHIFT 0 +#define I40IW_CQPSQ_MAT_REACHMAX_MASK \ + (0xffffffffUL << I40IW_CQPSQ_MAT_REACHMAX_SHIFT) + +#define I40IW_CQPSQ_MAT_MACADDR_SHIFT 0 +#define I40IW_CQPSQ_MAT_MACADDR_MASK \ + (0xffffffffffffULL << I40IW_CQPSQ_MAT_MACADDR_SHIFT) + +#define I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT 0 +#define I40IW_CQPSQ_MAT_ARPENTRYIDX_MASK \ + (0xfffUL << I40IW_CQPSQ_MAT_ARPENTRYIDX_SHIFT) + +#define I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT 42 +#define I40IW_CQPSQ_MAT_ENTRYVALID_MASK \ + (1ULL << I40IW_CQPSQ_MAT_ENTRYVALID_SHIFT) + +#define I40IW_CQPSQ_MAT_PERMANENT_SHIFT 43 +#define I40IW_CQPSQ_MAT_PERMANENT_MASK \ + (1ULL << I40IW_CQPSQ_MAT_PERMANENT_SHIFT) + +#define I40IW_CQPSQ_MAT_QUERY_SHIFT 44 +#define I40IW_CQPSQ_MAT_QUERY_MASK (1ULL << I40IW_CQPSQ_MAT_QUERY_SHIFT) + +/* Manage VF PBLE Backing Pages - MVPBP*/ +#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT 0 +#define I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_MASK \ + (0x3ffULL << I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT_SHIFT) + +#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT 16 +#define I40IW_CQPSQ_MVPBP_FIRST_PD_INX_MASK \ + (0x1ffULL << I40IW_CQPSQ_MVPBP_FIRST_PD_INX_SHIFT) + +#define I40IW_CQPSQ_MVPBP_SD_INX_SHIFT 32 +#define I40IW_CQPSQ_MVPBP_SD_INX_MASK \ + (0xfffULL << I40IW_CQPSQ_MVPBP_SD_INX_SHIFT) + +#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT 62 +#define I40IW_CQPSQ_MVPBP_INV_PD_ENT_MASK \ + (0x1ULL << I40IW_CQPSQ_MVPBP_INV_PD_ENT_SHIFT) + +#define I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT 3 +#define I40IW_CQPSQ_MVPBP_PD_PLPBA_MASK \ + (0x1fffffffffffffffULL << I40IW_CQPSQ_MVPBP_PD_PLPBA_SHIFT) + +/* Manage Push Page - MPP */ +#define I40IW_INVALID_PUSH_PAGE_INDEX 0xffff + +#define I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT 0 +#define I40IW_CQPSQ_MPP_QS_HANDLE_MASK (0xffffUL << \ + I40IW_CQPSQ_MPP_QS_HANDLE_SHIFT) + +#define I40IW_CQPSQ_MPP_PPIDX_SHIFT 0 +#define I40IW_CQPSQ_MPP_PPIDX_MASK (0x3ffUL << I40IW_CQPSQ_MPP_PPIDX_SHIFT) + +#define I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT 62 +#define I40IW_CQPSQ_MPP_FREE_PAGE_MASK (1ULL << I40IW_CQPSQ_MPP_FREE_PAGE_SHIFT) + +/* Upload Context - UCTX */ +#define I40IW_CQPSQ_UCTX_QPCTXADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IW_CQPSQ_UCTX_QPCTXADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IW_CQPSQ_UCTX_QPID_SHIFT 0 +#define I40IW_CQPSQ_UCTX_QPID_MASK (0x3ffffUL << I40IW_CQPSQ_UCTX_QPID_SHIFT) + +#define I40IW_CQPSQ_UCTX_QPTYPE_SHIFT 48 +#define I40IW_CQPSQ_UCTX_QPTYPE_MASK (0xfULL << I40IW_CQPSQ_UCTX_QPTYPE_SHIFT) + +#define I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT 61 +#define I40IW_CQPSQ_UCTX_RAWFORMAT_MASK \ + (1ULL << I40IW_CQPSQ_UCTX_RAWFORMAT_SHIFT) + +#define I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT 62 +#define I40IW_CQPSQ_UCTX_FREEZEQP_MASK \ + (1ULL << I40IW_CQPSQ_UCTX_FREEZEQP_SHIFT) + +/* Manage HMC PM Function Table - MHMC */ +#define I40IW_CQPSQ_MHMC_VFIDX_SHIFT 0 +#define I40IW_CQPSQ_MHMC_VFIDX_MASK (0x7fUL << I40IW_CQPSQ_MHMC_VFIDX_SHIFT) + +#define I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT 62 +#define I40IW_CQPSQ_MHMC_FREEPMFN_MASK \ + (1ULL << I40IW_CQPSQ_MHMC_FREEPMFN_SHIFT) + +/* Set HMC Resource Profile - SHMCRP */ +#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT 0 +#define I40IW_CQPSQ_SHMCRP_HMC_PROFILE_MASK \ + (0x7ULL << I40IW_CQPSQ_SHMCRP_HMC_PROFILE_SHIFT) +#define I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT 32 +#define I40IW_CQPSQ_SHMCRP_VFNUM_MASK (0x3fULL << I40IW_CQPSQ_SHMCRP_VFNUM_SHIFT) + +/* Create/Destroy CEQ */ +#define I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT 0 +#define I40IW_CQPSQ_CEQ_CEQSIZE_MASK \ + (0x1ffffUL << I40IW_CQPSQ_CEQ_CEQSIZE_SHIFT) + +#define I40IW_CQPSQ_CEQ_CEQID_SHIFT 0 +#define I40IW_CQPSQ_CEQ_CEQID_MASK (0x7fUL << I40IW_CQPSQ_CEQ_CEQID_SHIFT) + +#define I40IW_CQPSQ_CEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_CEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_CEQ_VMAP_SHIFT 47 +#define I40IW_CQPSQ_CEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_CEQ_VMAP_SHIFT) + +#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_CEQ_FIRSTPMPBLIDX_SHIFT) + +/* Create/Destroy AEQ */ +#define I40IW_CQPSQ_AEQ_AEQECNT_SHIFT 0 +#define I40IW_CQPSQ_AEQ_AEQECNT_MASK \ + (0x7ffffUL << I40IW_CQPSQ_AEQ_AEQECNT_SHIFT) + +#define I40IW_CQPSQ_AEQ_LPBLSIZE_SHIFT I40IW_CQPSQ_CQ_LPBLSIZE_SHIFT +#define I40IW_CQPSQ_AEQ_LPBLSIZE_MASK I40IW_CQPSQ_CQ_LPBLSIZE_MASK + +#define I40IW_CQPSQ_AEQ_VMAP_SHIFT 47 +#define I40IW_CQPSQ_AEQ_VMAP_MASK (1ULL << I40IW_CQPSQ_AEQ_VMAP_SHIFT) + +#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT 0 +#define I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_MASK \ + (0xfffffffUL << I40IW_CQPSQ_AEQ_FIRSTPMPBLIDX_SHIFT) + +/* Commit FPM Values - CFPM */ +#define I40IW_CQPSQ_CFPM_HMCFNID_SHIFT 0 +#define I40IW_CQPSQ_CFPM_HMCFNID_MASK (0x3fUL << I40IW_CQPSQ_CFPM_HMCFNID_SHIFT) + +/* Flush WQEs - FWQE */ +#define I40IW_CQPSQ_FWQE_AECODE_SHIFT 0 +#define I40IW_CQPSQ_FWQE_AECODE_MASK (0xffffUL << I40IW_CQPSQ_FWQE_AECODE_SHIFT) + +#define I40IW_CQPSQ_FWQE_AESOURCE_SHIFT 16 +#define I40IW_CQPSQ_FWQE_AESOURCE_MASK \ + (0xfUL << I40IW_CQPSQ_FWQE_AESOURCE_SHIFT) + +#define I40IW_CQPSQ_FWQE_RQMNERR_SHIFT 0 +#define I40IW_CQPSQ_FWQE_RQMNERR_MASK \ + (0xffffUL << I40IW_CQPSQ_FWQE_RQMNERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_RQMJERR_SHIFT 16 +#define I40IW_CQPSQ_FWQE_RQMJERR_MASK \ + (0xffffUL << I40IW_CQPSQ_FWQE_RQMJERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_SQMNERR_SHIFT 32 +#define I40IW_CQPSQ_FWQE_SQMNERR_MASK \ + (0xffffULL << I40IW_CQPSQ_FWQE_SQMNERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_SQMJERR_SHIFT 48 +#define I40IW_CQPSQ_FWQE_SQMJERR_MASK \ + (0xffffULL << I40IW_CQPSQ_FWQE_SQMJERR_SHIFT) + +#define I40IW_CQPSQ_FWQE_QPID_SHIFT 0 +#define I40IW_CQPSQ_FWQE_QPID_MASK (0x3ffffULL << I40IW_CQPSQ_FWQE_QPID_SHIFT) + +#define I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT 59 +#define I40IW_CQPSQ_FWQE_GENERATE_AE_MASK (1ULL << \ + I40IW_CQPSQ_FWQE_GENERATE_AE_SHIFT) + +#define I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT 60 +#define I40IW_CQPSQ_FWQE_USERFLCODE_MASK \ + (1ULL << I40IW_CQPSQ_FWQE_USERFLCODE_SHIFT) + +#define I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT 61 +#define I40IW_CQPSQ_FWQE_FLUSHSQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHSQ_SHIFT) + +#define I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT 62 +#define I40IW_CQPSQ_FWQE_FLUSHRQ_MASK (1ULL << I40IW_CQPSQ_FWQE_FLUSHRQ_SHIFT) + +/* Manage Accelerated Port Table - MAPT */ +#define I40IW_CQPSQ_MAPT_PORT_SHIFT 0 +#define I40IW_CQPSQ_MAPT_PORT_MASK (0xffffUL << I40IW_CQPSQ_MAPT_PORT_SHIFT) + +#define I40IW_CQPSQ_MAPT_ADDPORT_SHIFT 62 +#define I40IW_CQPSQ_MAPT_ADDPORT_MASK (1ULL << I40IW_CQPSQ_MAPT_ADDPORT_SHIFT) + +/* Update Protocol Engine SDs */ +#define I40IW_CQPSQ_UPESD_SDCMD_SHIFT 0 +#define I40IW_CQPSQ_UPESD_SDCMD_MASK (0xffffffffUL << I40IW_CQPSQ_UPESD_SDCMD_SHIFT) + +#define I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT 0 +#define I40IW_CQPSQ_UPESD_SDDATALOW_MASK \ + (0xffffffffUL << I40IW_CQPSQ_UPESD_SDDATALOW_SHIFT) + +#define I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT 32 +#define I40IW_CQPSQ_UPESD_SDDATAHI_MASK \ + (0xffffffffULL << I40IW_CQPSQ_UPESD_SDDATAHI_SHIFT) +#define I40IW_CQPSQ_UPESD_HMCFNID_SHIFT 0 +#define I40IW_CQPSQ_UPESD_HMCFNID_MASK \ + (0x3fUL << I40IW_CQPSQ_UPESD_HMCFNID_SHIFT) + +#define I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT 63 +#define I40IW_CQPSQ_UPESD_ENTRY_VALID_MASK \ + ((u64)1 << I40IW_CQPSQ_UPESD_ENTRY_VALID_SHIFT) + +#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT 0 +#define I40IW_CQPSQ_UPESD_ENTRY_COUNT_MASK \ + (0xfUL << I40IW_CQPSQ_UPESD_ENTRY_COUNT_SHIFT) + +#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT 7 +#define I40IW_CQPSQ_UPESD_SKIP_ENTRY_MASK \ + (0x1UL << I40IW_CQPSQ_UPESD_SKIP_ENTRY_SHIFT) + +/* Suspend QP */ +#define I40IW_CQPSQ_SUSPENDQP_QPID_SHIFT 0 +#define I40IW_CQPSQ_SUSPENDQP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +/* Resume QP */ +#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT 0 +#define I40IW_CQPSQ_RESUMEQP_QSHANDLE_MASK \ + (0xffffffffUL << I40IW_CQPSQ_RESUMEQP_QSHANDLE_SHIFT) + +#define I40IW_CQPSQ_RESUMEQP_QPID_SHIFT 0 +#define I40IW_CQPSQ_RESUMEQP_QPID_MASK (0x3FFFFUL) +/* I40IWCQ_QPID_MASK */ + +/* IW QP Context */ +#define I40IWQPC_DDP_VER_SHIFT 0 +#define I40IWQPC_DDP_VER_MASK (3UL << I40IWQPC_DDP_VER_SHIFT) + +#define I40IWQPC_SNAP_SHIFT 2 +#define I40IWQPC_SNAP_MASK (1UL << I40IWQPC_SNAP_SHIFT) + +#define I40IWQPC_IPV4_SHIFT 3 +#define I40IWQPC_IPV4_MASK (1UL << I40IWQPC_IPV4_SHIFT) + +#define I40IWQPC_NONAGLE_SHIFT 4 +#define I40IWQPC_NONAGLE_MASK (1UL << I40IWQPC_NONAGLE_SHIFT) + +#define I40IWQPC_INSERTVLANTAG_SHIFT 5 +#define I40IWQPC_INSERTVLANTAG_MASK (1 << I40IWQPC_INSERTVLANTAG_SHIFT) + +#define I40IWQPC_USESRQ_SHIFT 6 +#define I40IWQPC_USESRQ_MASK (1UL << I40IWQPC_USESRQ_SHIFT) + +#define I40IWQPC_TIMESTAMP_SHIFT 7 +#define I40IWQPC_TIMESTAMP_MASK (1UL << I40IWQPC_TIMESTAMP_SHIFT) + +#define I40IWQPC_RQWQESIZE_SHIFT 8 +#define I40IWQPC_RQWQESIZE_MASK (3UL << I40IWQPC_RQWQESIZE_SHIFT) + +#define I40IWQPC_INSERTL2TAG2_SHIFT 11 +#define I40IWQPC_INSERTL2TAG2_MASK (1UL << I40IWQPC_INSERTL2TAG2_SHIFT) + +#define I40IWQPC_LIMIT_SHIFT 12 +#define I40IWQPC_LIMIT_MASK (3UL << I40IWQPC_LIMIT_SHIFT) + +#define I40IWQPC_DROPOOOSEG_SHIFT 15 +#define I40IWQPC_DROPOOOSEG_MASK (1UL << I40IWQPC_DROPOOOSEG_SHIFT) + +#define I40IWQPC_DUPACK_THRESH_SHIFT 16 +#define I40IWQPC_DUPACK_THRESH_MASK (7UL << I40IWQPC_DUPACK_THRESH_SHIFT) + +#define I40IWQPC_ERR_RQ_IDX_VALID_SHIFT 19 +#define I40IWQPC_ERR_RQ_IDX_VALID_MASK (1UL << I40IWQPC_ERR_RQ_IDX_VALID_SHIFT) + +#define I40IWQPC_DIS_VLAN_CHECKS_SHIFT 19 +#define I40IWQPC_DIS_VLAN_CHECKS_MASK (7UL << I40IWQPC_DIS_VLAN_CHECKS_SHIFT) + +#define I40IWQPC_RCVTPHEN_SHIFT 28 +#define I40IWQPC_RCVTPHEN_MASK (1UL << I40IWQPC_RCVTPHEN_SHIFT) + +#define I40IWQPC_XMITTPHEN_SHIFT 29 +#define I40IWQPC_XMITTPHEN_MASK (1ULL << I40IWQPC_XMITTPHEN_SHIFT) + +#define I40IWQPC_RQTPHEN_SHIFT 30 +#define I40IWQPC_RQTPHEN_MASK (1UL << I40IWQPC_RQTPHEN_SHIFT) + +#define I40IWQPC_SQTPHEN_SHIFT 31 +#define I40IWQPC_SQTPHEN_MASK (1ULL << I40IWQPC_SQTPHEN_SHIFT) + +#define I40IWQPC_PPIDX_SHIFT 32 +#define I40IWQPC_PPIDX_MASK (0x3ffULL << I40IWQPC_PPIDX_SHIFT) + +#define I40IWQPC_PMENA_SHIFT 47 +#define I40IWQPC_PMENA_MASK (1ULL << I40IWQPC_PMENA_SHIFT) + +#define I40IWQPC_RDMAP_VER_SHIFT 62 +#define I40IWQPC_RDMAP_VER_MASK (3ULL << I40IWQPC_RDMAP_VER_SHIFT) + +#define I40IWQPC_SQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_SQADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_RQADDR_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_RQADDR_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_TTL_SHIFT 0 +#define I40IWQPC_TTL_MASK (0xffUL << I40IWQPC_TTL_SHIFT) + +#define I40IWQPC_RQSIZE_SHIFT 8 +#define I40IWQPC_RQSIZE_MASK (0xfUL << I40IWQPC_RQSIZE_SHIFT) + +#define I40IWQPC_SQSIZE_SHIFT 12 +#define I40IWQPC_SQSIZE_MASK (0xfUL << I40IWQPC_SQSIZE_SHIFT) + +#define I40IWQPC_SRCMACADDRIDX_SHIFT 16 +#define I40IWQPC_SRCMACADDRIDX_MASK (0x3fUL << I40IWQPC_SRCMACADDRIDX_SHIFT) + +#define I40IWQPC_AVOIDSTRETCHACK_SHIFT 23 +#define I40IWQPC_AVOIDSTRETCHACK_MASK (1UL << I40IWQPC_AVOIDSTRETCHACK_SHIFT) + +#define I40IWQPC_TOS_SHIFT 24 +#define I40IWQPC_TOS_MASK (0xffUL << I40IWQPC_TOS_SHIFT) + +#define I40IWQPC_SRCPORTNUM_SHIFT 32 +#define I40IWQPC_SRCPORTNUM_MASK (0xffffULL << I40IWQPC_SRCPORTNUM_SHIFT) + +#define I40IWQPC_DESTPORTNUM_SHIFT 48 +#define I40IWQPC_DESTPORTNUM_MASK (0xffffULL << I40IWQPC_DESTPORTNUM_SHIFT) + +#define I40IWQPC_DESTIPADDR0_SHIFT 32 +#define I40IWQPC_DESTIPADDR0_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR0_SHIFT) + +#define I40IWQPC_DESTIPADDR1_SHIFT 0 +#define I40IWQPC_DESTIPADDR1_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR1_SHIFT) + +#define I40IWQPC_DESTIPADDR2_SHIFT 32 +#define I40IWQPC_DESTIPADDR2_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR2_SHIFT) + +#define I40IWQPC_DESTIPADDR3_SHIFT 0 +#define I40IWQPC_DESTIPADDR3_MASK \ + (0xffffffffULL << I40IWQPC_DESTIPADDR3_SHIFT) + +#define I40IWQPC_SNDMSS_SHIFT 16 +#define I40IWQPC_SNDMSS_MASK (0x3fffUL << I40IWQPC_SNDMSS_SHIFT) + +#define I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT 16 +#define I40IW_UDA_QPC_MAXFRAMESIZE_MASK (0x3fffUL << I40IW_UDA_QPC_MAXFRAMESIZE_SHIFT) + +#define I40IWQPC_VLANTAG_SHIFT 32 +#define I40IWQPC_VLANTAG_MASK (0xffffULL << I40IWQPC_VLANTAG_SHIFT) + +#define I40IWQPC_ARPIDX_SHIFT 48 +#define I40IWQPC_ARPIDX_MASK (0xffffULL << I40IWQPC_ARPIDX_SHIFT) + +#define I40IWQPC_FLOWLABEL_SHIFT 0 +#define I40IWQPC_FLOWLABEL_MASK (0xfffffUL << I40IWQPC_FLOWLABEL_SHIFT) + +#define I40IWQPC_WSCALE_SHIFT 20 +#define I40IWQPC_WSCALE_MASK (1UL << I40IWQPC_WSCALE_SHIFT) + +#define I40IWQPC_KEEPALIVE_SHIFT 21 +#define I40IWQPC_KEEPALIVE_MASK (1UL << I40IWQPC_KEEPALIVE_SHIFT) + +#define I40IWQPC_IGNORE_TCP_OPT_SHIFT 22 +#define I40IWQPC_IGNORE_TCP_OPT_MASK (1UL << I40IWQPC_IGNORE_TCP_OPT_SHIFT) + +#define I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT 23 +#define I40IWQPC_IGNORE_TCP_UNS_OPT_MASK \ + (1UL << I40IWQPC_IGNORE_TCP_UNS_OPT_SHIFT) + +#define I40IWQPC_TCPSTATE_SHIFT 28 +#define I40IWQPC_TCPSTATE_MASK (0xfUL << I40IWQPC_TCPSTATE_SHIFT) + +#define I40IWQPC_RCVSCALE_SHIFT 32 +#define I40IWQPC_RCVSCALE_MASK (0xfULL << I40IWQPC_RCVSCALE_SHIFT) + +#define I40IWQPC_SNDSCALE_SHIFT 40 +#define I40IWQPC_SNDSCALE_MASK (0xfULL << I40IWQPC_SNDSCALE_SHIFT) + +#define I40IWQPC_PDIDX_SHIFT 48 +#define I40IWQPC_PDIDX_MASK (0x7fffULL << I40IWQPC_PDIDX_SHIFT) + +#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT 16 +#define I40IWQPC_KALIVE_TIMER_MAX_PROBES_MASK \ + (0xffUL << I40IWQPC_KALIVE_TIMER_MAX_PROBES_SHIFT) + +#define I40IWQPC_KEEPALIVE_INTERVAL_SHIFT 24 +#define I40IWQPC_KEEPALIVE_INTERVAL_MASK \ + (0xffUL << I40IWQPC_KEEPALIVE_INTERVAL_SHIFT) + +#define I40IWQPC_TIMESTAMP_RECENT_SHIFT 0 +#define I40IWQPC_TIMESTAMP_RECENT_MASK \ + (0xffffffffUL << I40IWQPC_TIMESTAMP_RECENT_SHIFT) + +#define I40IWQPC_TIMESTAMP_AGE_SHIFT 32 +#define I40IWQPC_TIMESTAMP_AGE_MASK \ + (0xffffffffULL << I40IWQPC_TIMESTAMP_AGE_SHIFT) + +#define I40IWQPC_SNDNXT_SHIFT 0 +#define I40IWQPC_SNDNXT_MASK (0xffffffffUL << I40IWQPC_SNDNXT_SHIFT) + +#define I40IWQPC_SNDWND_SHIFT 32 +#define I40IWQPC_SNDWND_MASK (0xffffffffULL << I40IWQPC_SNDWND_SHIFT) + +#define I40IWQPC_RCVNXT_SHIFT 0 +#define I40IWQPC_RCVNXT_MASK (0xffffffffUL << I40IWQPC_RCVNXT_SHIFT) + +#define I40IWQPC_RCVWND_SHIFT 32 +#define I40IWQPC_RCVWND_MASK (0xffffffffULL << I40IWQPC_RCVWND_SHIFT) + +#define I40IWQPC_SNDMAX_SHIFT 0 +#define I40IWQPC_SNDMAX_MASK (0xffffffffUL << I40IWQPC_SNDMAX_SHIFT) + +#define I40IWQPC_SNDUNA_SHIFT 32 +#define I40IWQPC_SNDUNA_MASK (0xffffffffULL << I40IWQPC_SNDUNA_SHIFT) + +#define I40IWQPC_SRTT_SHIFT 0 +#define I40IWQPC_SRTT_MASK (0xffffffffUL << I40IWQPC_SRTT_SHIFT) + +#define I40IWQPC_RTTVAR_SHIFT 32 +#define I40IWQPC_RTTVAR_MASK (0xffffffffULL << I40IWQPC_RTTVAR_SHIFT) + +#define I40IWQPC_SSTHRESH_SHIFT 0 +#define I40IWQPC_SSTHRESH_MASK (0xffffffffUL << I40IWQPC_SSTHRESH_SHIFT) + +#define I40IWQPC_CWND_SHIFT 32 +#define I40IWQPC_CWND_MASK (0xffffffffULL << I40IWQPC_CWND_SHIFT) + +#define I40IWQPC_SNDWL1_SHIFT 0 +#define I40IWQPC_SNDWL1_MASK (0xffffffffUL << I40IWQPC_SNDWL1_SHIFT) + +#define I40IWQPC_SNDWL2_SHIFT 32 +#define I40IWQPC_SNDWL2_MASK (0xffffffffULL << I40IWQPC_SNDWL2_SHIFT) + +#define I40IWQPC_ERR_RQ_IDX_SHIFT 32 +#define I40IWQPC_ERR_RQ_IDX_MASK (0x3fffULL << I40IWQPC_ERR_RQ_IDX_SHIFT) + +#define I40IWQPC_MAXSNDWND_SHIFT 0 +#define I40IWQPC_MAXSNDWND_MASK (0xffffffffUL << I40IWQPC_MAXSNDWND_SHIFT) + +#define I40IWQPC_REXMIT_THRESH_SHIFT 48 +#define I40IWQPC_REXMIT_THRESH_MASK (0x3fULL << I40IWQPC_REXMIT_THRESH_SHIFT) + +#define I40IWQPC_TXCQNUM_SHIFT 0 +#define I40IWQPC_TXCQNUM_MASK (0x1ffffUL << I40IWQPC_TXCQNUM_SHIFT) + +#define I40IWQPC_RXCQNUM_SHIFT 32 +#define I40IWQPC_RXCQNUM_MASK (0x1ffffULL << I40IWQPC_RXCQNUM_SHIFT) + +#define I40IWQPC_STAT_INDEX_SHIFT 0 +#define I40IWQPC_STAT_INDEX_MASK (0x1fULL << I40IWQPC_STAT_INDEX_SHIFT) + +#define I40IWQPC_Q2ADDR_SHIFT 0 +#define I40IWQPC_Q2ADDR_MASK (0xffffffffffffff00ULL << I40IWQPC_Q2ADDR_SHIFT) + +#define I40IWQPC_LASTBYTESENT_SHIFT 0 +#define I40IWQPC_LASTBYTESENT_MASK (0xffUL << I40IWQPC_LASTBYTESENT_SHIFT) + +#define I40IWQPC_SRQID_SHIFT 32 +#define I40IWQPC_SRQID_MASK (0xffULL << I40IWQPC_SRQID_SHIFT) + +#define I40IWQPC_ORDSIZE_SHIFT 0 +#define I40IWQPC_ORDSIZE_MASK (0x7fUL << I40IWQPC_ORDSIZE_SHIFT) + +#define I40IWQPC_IRDSIZE_SHIFT 16 +#define I40IWQPC_IRDSIZE_MASK (0x3UL << I40IWQPC_IRDSIZE_SHIFT) + +#define I40IWQPC_WRRDRSPOK_SHIFT 20 +#define I40IWQPC_WRRDRSPOK_MASK (1UL << I40IWQPC_WRRDRSPOK_SHIFT) + +#define I40IWQPC_RDOK_SHIFT 21 +#define I40IWQPC_RDOK_MASK (1UL << I40IWQPC_RDOK_SHIFT) + +#define I40IWQPC_SNDMARKERS_SHIFT 22 +#define I40IWQPC_SNDMARKERS_MASK (1UL << I40IWQPC_SNDMARKERS_SHIFT) + +#define I40IWQPC_BINDEN_SHIFT 23 +#define I40IWQPC_BINDEN_MASK (1UL << I40IWQPC_BINDEN_SHIFT) + +#define I40IWQPC_FASTREGEN_SHIFT 24 +#define I40IWQPC_FASTREGEN_MASK (1UL << I40IWQPC_FASTREGEN_SHIFT) + +#define I40IWQPC_PRIVEN_SHIFT 25 +#define I40IWQPC_PRIVEN_MASK (1UL << I40IWQPC_PRIVEN_SHIFT) + +#define I40IWQPC_USESTATSINSTANCE_SHIFT 26 +#define I40IWQPC_USESTATSINSTANCE_MASK (1UL << I40IWQPC_USESTATSINSTANCE_SHIFT) + +#define I40IWQPC_IWARPMODE_SHIFT 28 +#define I40IWQPC_IWARPMODE_MASK (1UL << I40IWQPC_IWARPMODE_SHIFT) + +#define I40IWQPC_RCVMARKERS_SHIFT 29 +#define I40IWQPC_RCVMARKERS_MASK (1UL << I40IWQPC_RCVMARKERS_SHIFT) + +#define I40IWQPC_ALIGNHDRS_SHIFT 30 +#define I40IWQPC_ALIGNHDRS_MASK (1UL << I40IWQPC_ALIGNHDRS_SHIFT) + +#define I40IWQPC_RCVNOMPACRC_SHIFT 31 +#define I40IWQPC_RCVNOMPACRC_MASK (1UL << I40IWQPC_RCVNOMPACRC_SHIFT) + +#define I40IWQPC_RCVMARKOFFSET_SHIFT 33 +#define I40IWQPC_RCVMARKOFFSET_MASK (0x1ffULL << I40IWQPC_RCVMARKOFFSET_SHIFT) + +#define I40IWQPC_SNDMARKOFFSET_SHIFT 48 +#define I40IWQPC_SNDMARKOFFSET_MASK (0x1ffULL << I40IWQPC_SNDMARKOFFSET_SHIFT) + +#define I40IWQPC_QPCOMPCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPC_QPCOMPCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPC_SQTPHVAL_SHIFT 0 +#define I40IWQPC_SQTPHVAL_MASK (0xffUL << I40IWQPC_SQTPHVAL_SHIFT) + +#define I40IWQPC_RQTPHVAL_SHIFT 8 +#define I40IWQPC_RQTPHVAL_MASK (0xffUL << I40IWQPC_RQTPHVAL_SHIFT) + +#define I40IWQPC_QSHANDLE_SHIFT 16 +#define I40IWQPC_QSHANDLE_MASK (0x3ffUL << I40IWQPC_QSHANDLE_SHIFT) + +#define I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT 32 +#define I40IWQPC_EXCEPTION_LAN_QUEUE_MASK (0xfffULL << \ + I40IWQPC_EXCEPTION_LAN_QUEUE_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR3_SHIFT 0 +#define I40IWQPC_LOCAL_IPADDR3_MASK \ + (0xffffffffUL << I40IWQPC_LOCAL_IPADDR3_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR2_SHIFT 32 +#define I40IWQPC_LOCAL_IPADDR2_MASK \ + (0xffffffffULL << I40IWQPC_LOCAL_IPADDR2_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR1_SHIFT 0 +#define I40IWQPC_LOCAL_IPADDR1_MASK \ + (0xffffffffUL << I40IWQPC_LOCAL_IPADDR1_SHIFT) + +#define I40IWQPC_LOCAL_IPADDR0_SHIFT 32 +#define I40IWQPC_LOCAL_IPADDR0_MASK \ + (0xffffffffULL << I40IWQPC_LOCAL_IPADDR0_SHIFT) + +/* wqe size considering 32 bytes per wqe*/ +#define I40IW_QP_SW_MIN_WQSIZE 4 /*in WRs*/ +#define I40IW_SQ_RSVD 2 +#define I40IW_RQ_RSVD 1 +#define I40IW_MAX_QUANTAS_PER_WR 2 +#define I40IW_QP_SW_MAX_SQ_QUANTAS 2048 +#define I40IW_QP_SW_MAX_RQ_QUANTAS 16384 +#define I40IW_MAX_QP_WRS ((I40IW_QP_SW_MAX_SQ_QUANTAS / I40IW_MAX_QUANTAS_PER_WR) - 1) + +#define I40IWQP_OP_RDMA_WRITE 0 +#define I40IWQP_OP_RDMA_READ 1 +#define I40IWQP_OP_RDMA_SEND 3 +#define I40IWQP_OP_RDMA_SEND_INV 4 +#define I40IWQP_OP_RDMA_SEND_SOL_EVENT 5 +#define I40IWQP_OP_RDMA_SEND_SOL_EVENT_INV 6 +#define I40IWQP_OP_BIND_MW 8 +#define I40IWQP_OP_FAST_REGISTER 9 +#define I40IWQP_OP_LOCAL_INVALIDATE 10 +#define I40IWQP_OP_RDMA_READ_LOC_INV 11 +#define I40IWQP_OP_NOP 12 + +#define I40IW_RSVD_SHIFT 41 +#define I40IW_RSVD_MASK (0x7fffULL << I40IW_RSVD_SHIFT) + +/* iwarp QP SQ WQE common fields */ +#define I40IWQPSQ_OPCODE_SHIFT 32 +#define I40IWQPSQ_OPCODE_MASK (0x3fULL << I40IWQPSQ_OPCODE_SHIFT) + +#define I40IWQPSQ_ADDFRAGCNT_SHIFT 38 +#define I40IWQPSQ_ADDFRAGCNT_MASK (0x7ULL << I40IWQPSQ_ADDFRAGCNT_SHIFT) + +#define I40IWQPSQ_PUSHWQE_SHIFT 56 +#define I40IWQPSQ_PUSHWQE_MASK (1ULL << I40IWQPSQ_PUSHWQE_SHIFT) + +#define I40IWQPSQ_STREAMMODE_SHIFT 58 +#define I40IWQPSQ_STREAMMODE_MASK (1ULL << I40IWQPSQ_STREAMMODE_SHIFT) + +#define I40IWQPSQ_WAITFORRCVPDU_SHIFT 59 +#define I40IWQPSQ_WAITFORRCVPDU_MASK (1ULL << I40IWQPSQ_WAITFORRCVPDU_SHIFT) + +#define I40IWQPSQ_READFENCE_SHIFT 60 +#define I40IWQPSQ_READFENCE_MASK (1ULL << I40IWQPSQ_READFENCE_SHIFT) + +#define I40IWQPSQ_LOCALFENCE_SHIFT 61 +#define I40IWQPSQ_LOCALFENCE_MASK (1ULL << I40IWQPSQ_LOCALFENCE_SHIFT) + +#define I40IWQPSQ_SIGCOMPL_SHIFT 62 +#define I40IWQPSQ_SIGCOMPL_MASK (1ULL << I40IWQPSQ_SIGCOMPL_SHIFT) + +#define I40IWQPSQ_VALID_SHIFT 63 +#define I40IWQPSQ_VALID_MASK (1ULL << I40IWQPSQ_VALID_SHIFT) + +#define I40IWQPSQ_FRAG_TO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_FRAG_TO_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPSQ_FRAG_LEN_SHIFT 0 +#define I40IWQPSQ_FRAG_LEN_MASK (0xffffffffUL << I40IWQPSQ_FRAG_LEN_SHIFT) + +#define I40IWQPSQ_FRAG_STAG_SHIFT 32 +#define I40IWQPSQ_FRAG_STAG_MASK (0xffffffffULL << I40IWQPSQ_FRAG_STAG_SHIFT) + +#define I40IWQPSQ_REMSTAGINV_SHIFT 0 +#define I40IWQPSQ_REMSTAGINV_MASK (0xffffffffUL << I40IWQPSQ_REMSTAGINV_SHIFT) + +#define I40IWQPSQ_INLINEDATAFLAG_SHIFT 57 +#define I40IWQPSQ_INLINEDATAFLAG_MASK (1ULL << I40IWQPSQ_INLINEDATAFLAG_SHIFT) + +#define I40IWQPSQ_INLINEDATALEN_SHIFT 48 +#define I40IWQPSQ_INLINEDATALEN_MASK \ + (0x7fULL << I40IWQPSQ_INLINEDATALEN_SHIFT) + +/* iwarp send with push mode */ +#define I40IWQPSQ_WQDESCIDX_SHIFT 0 +#define I40IWQPSQ_WQDESCIDX_MASK (0x3fffUL << I40IWQPSQ_WQDESCIDX_SHIFT) + +/* rdma write */ +#define I40IWQPSQ_REMSTAG_SHIFT 0 +#define I40IWQPSQ_REMSTAG_MASK (0xffffffffUL << I40IWQPSQ_REMSTAG_SHIFT) + +#define I40IWQPSQ_REMTO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_REMTO_MASK I40IW_CQPHC_QPCTX_MASK + +/* memory window */ +#define I40IWQPSQ_STAGRIGHTS_SHIFT 48 +#define I40IWQPSQ_STAGRIGHTS_MASK (0x1fULL << I40IWQPSQ_STAGRIGHTS_SHIFT) + +#define I40IWQPSQ_VABASEDTO_SHIFT 53 +#define I40IWQPSQ_VABASEDTO_MASK (1ULL << I40IWQPSQ_VABASEDTO_SHIFT) + +#define I40IWQPSQ_MWLEN_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_MWLEN_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPSQ_PARENTMRSTAG_SHIFT 0 +#define I40IWQPSQ_PARENTMRSTAG_MASK \ + (0xffffffffUL << I40IWQPSQ_PARENTMRSTAG_SHIFT) + +#define I40IWQPSQ_MWSTAG_SHIFT 32 +#define I40IWQPSQ_MWSTAG_MASK (0xffffffffULL << I40IWQPSQ_MWSTAG_SHIFT) + +#define I40IWQPSQ_BASEVA_TO_FBO_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPSQ_BASEVA_TO_FBO_MASK I40IW_CQPHC_QPCTX_MASK + +/* Local Invalidate */ +#define I40IWQPSQ_LOCSTAG_SHIFT 32 +#define I40IWQPSQ_LOCSTAG_MASK (0xffffffffULL << I40IWQPSQ_LOCSTAG_SHIFT) + +/* Fast Register */ +#define I40IWQPSQ_STAGKEY_SHIFT 0 +#define I40IWQPSQ_STAGKEY_MASK (0xffUL << I40IWQPSQ_STAGKEY_SHIFT) + +#define I40IWQPSQ_STAGINDEX_SHIFT 8 +#define I40IWQPSQ_STAGINDEX_MASK (0xffffffUL << I40IWQPSQ_STAGINDEX_SHIFT) + +#define I40IWQPSQ_COPYHOSTPBLS_SHIFT 43 +#define I40IWQPSQ_COPYHOSTPBLS_MASK (1ULL << I40IWQPSQ_COPYHOSTPBLS_SHIFT) + +#define I40IWQPSQ_LPBLSIZE_SHIFT 44 +#define I40IWQPSQ_LPBLSIZE_MASK (3ULL << I40IWQPSQ_LPBLSIZE_SHIFT) + +#define I40IWQPSQ_HPAGESIZE_SHIFT 46 +#define I40IWQPSQ_HPAGESIZE_MASK (3ULL << I40IWQPSQ_HPAGESIZE_SHIFT) + +#define I40IWQPSQ_STAGLEN_SHIFT 0 +#define I40IWQPSQ_STAGLEN_MASK (0x1ffffffffffULL << I40IWQPSQ_STAGLEN_SHIFT) + +#define I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT 48 +#define I40IWQPSQ_FIRSTPMPBLIDXLO_MASK \ + (0xffffULL << I40IWQPSQ_FIRSTPMPBLIDXLO_SHIFT) + +#define I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT 0 +#define I40IWQPSQ_FIRSTPMPBLIDXHI_MASK \ + (0xfffUL << I40IWQPSQ_FIRSTPMPBLIDXHI_SHIFT) + +#define I40IWQPSQ_PBLADDR_SHIFT 12 +#define I40IWQPSQ_PBLADDR_MASK (0xfffffffffffffULL << I40IWQPSQ_PBLADDR_SHIFT) + +/* iwarp QP RQ WQE common fields */ +#define I40IWQPRQ_ADDFRAGCNT_SHIFT I40IWQPSQ_ADDFRAGCNT_SHIFT +#define I40IWQPRQ_ADDFRAGCNT_MASK I40IWQPSQ_ADDFRAGCNT_MASK + +#define I40IWQPRQ_VALID_SHIFT I40IWQPSQ_VALID_SHIFT +#define I40IWQPRQ_VALID_MASK I40IWQPSQ_VALID_MASK + +#define I40IWQPRQ_COMPLCTX_SHIFT I40IW_CQPHC_QPCTX_SHIFT +#define I40IWQPRQ_COMPLCTX_MASK I40IW_CQPHC_QPCTX_MASK + +#define I40IWQPRQ_FRAG_LEN_SHIFT I40IWQPSQ_FRAG_LEN_SHIFT +#define I40IWQPRQ_FRAG_LEN_MASK I40IWQPSQ_FRAG_LEN_MASK + +#define I40IWQPRQ_STAG_SHIFT I40IWQPSQ_FRAG_STAG_SHIFT +#define I40IWQPRQ_STAG_MASK I40IWQPSQ_FRAG_STAG_MASK + +#define I40IWQPRQ_TO_SHIFT I40IWQPSQ_FRAG_TO_SHIFT +#define I40IWQPRQ_TO_MASK I40IWQPSQ_FRAG_TO_MASK + +/* Query FPM CQP buf */ +#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_QPS_MASK \ + (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CQS_MASK \ + (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT) + +#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT 0 +#define I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_MASK \ + (0x3fffUL << I40IW_QUERY_FPM_FIRST_PE_SD_INDEX_SHIFT) + +#define I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT 32 +#define I40IW_QUERY_FPM_MAX_PE_SDS_MASK \ + (0x3fffULL << I40IW_QUERY_FPM_MAX_PE_SDS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_QPS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_QPS_MASK \ + (0x7ffffUL << I40IW_QUERY_FPM_MAX_QPS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CQS_MASK \ + (0x3ffffUL << I40IW_QUERY_FPM_MAX_CQS_SHIFT) + +#define I40IW_QUERY_FPM_MAX_CEQS_SHIFT 0 +#define I40IW_QUERY_FPM_MAX_CEQS_MASK \ + (0xffUL << I40IW_QUERY_FPM_MAX_CEQS_SHIFT) + +#define I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT 32 +#define I40IW_QUERY_FPM_XFBLOCKSIZE_MASK \ + (0xffffffffULL << I40IW_QUERY_FPM_XFBLOCKSIZE_SHIFT) + +#define I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT 32 +#define I40IW_QUERY_FPM_Q1BLOCKSIZE_MASK \ + (0xffffffffULL << I40IW_QUERY_FPM_Q1BLOCKSIZE_SHIFT) + +#define I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT 16 +#define I40IW_QUERY_FPM_HTMULTIPLIER_MASK \ + (0xfUL << I40IW_QUERY_FPM_HTMULTIPLIER_SHIFT) + +#define I40IW_QUERY_FPM_TIMERBUCKET_SHIFT 32 +#define I40IW_QUERY_FPM_TIMERBUCKET_MASK \ + (0xffFFULL << I40IW_QUERY_FPM_TIMERBUCKET_SHIFT) + +/* Static HMC pages allocated buf */ +#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT 0 +#define I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_MASK \ + (0x3fUL << I40IW_SHMC_PAGE_ALLOCATED_HMC_FN_ID_SHIFT) + +#define I40IW_HW_PAGE_SIZE 4096 +#define I40IW_DONE_COUNT 1000 +#define I40IW_SLEEP_COUNT 10 + +enum { + I40IW_QUEUES_ALIGNMENT_MASK = (128 - 1), + I40IW_AEQ_ALIGNMENT_MASK = (256 - 1), + I40IW_Q2_ALIGNMENT_MASK = (256 - 1), + I40IW_CEQ_ALIGNMENT_MASK = (256 - 1), + I40IW_CQ0_ALIGNMENT_MASK = (256 - 1), + I40IW_HOST_CTX_ALIGNMENT_MASK = (4 - 1), + I40IW_SHADOWAREA_MASK = (128 - 1), + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK = (4 - 1), + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK = (4 - 1) +}; + +enum i40iw_alignment { + I40IW_CQP_ALIGNMENT = 0x200, + I40IW_AEQ_ALIGNMENT = 0x100, + I40IW_CEQ_ALIGNMENT = 0x100, + I40IW_CQ0_ALIGNMENT = 0x100, + I40IW_SD_BUF_ALIGNMENT = 0x80 +}; + +#define I40IW_WQE_SIZE_64 64 + +#define I40IW_QP_WQE_MIN_SIZE 32 +#define I40IW_QP_WQE_MAX_SIZE 128 + +#define I40IW_UPDATE_SD_BUF_SIZE 128 + +#define I40IW_CQE_QTYPE_RQ 0 +#define I40IW_CQE_QTYPE_SQ 1 + +#define I40IW_RING_INIT(_ring, _size) \ + { \ + (_ring).head = 0; \ + (_ring).tail = 0; \ + (_ring).size = (_size); \ + } +#define I40IW_RING_GETSIZE(_ring) ((_ring).size) +#define I40IW_RING_GETCURRENT_HEAD(_ring) ((_ring).head) +#define I40IW_RING_GETCURRENT_TAIL(_ring) ((_ring).tail) + +#define I40IW_RING_MOVE_HEAD(_ring, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if (!I40IW_RING_FULL_ERR(_ring)) { \ + (_ring).head = ((_ring).head + 1) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = I40IW_ERR_RING_FULL; \ + } \ + } + +#define I40IW_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \ + { \ + register u32 size; \ + size = (_ring).size; \ + if ((I40IW_RING_WORK_AVAILABLE(_ring) + (_count)) < size) { \ + (_ring).head = ((_ring).head + (_count)) % size; \ + (_retcode) = 0; \ + } else { \ + (_retcode) = I40IW_ERR_RING_FULL; \ + } \ + } + +#define I40IW_RING_MOVE_TAIL(_ring) \ + (_ring).tail = ((_ring).tail + 1) % (_ring).size + +#define I40IW_RING_MOVE_HEAD_NOCHECK(_ring) \ + (_ring).head = ((_ring).head + 1) % (_ring).size + +#define I40IW_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \ + (_ring).tail = ((_ring).tail + (_count)) % (_ring).size + +#define I40IW_RING_SET_TAIL(_ring, _pos) \ + (_ring).tail = (_pos) % (_ring).size + +#define I40IW_RING_FULL_ERR(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 1)) \ + ) + +#define I40IW_ERR_RING_FULL2(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 2)) \ + ) + +#define I40IW_ERR_RING_FULL3(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) == ((_ring).size - 3)) \ + ) + +#define I40IW_RING_MORE_WORK(_ring) \ + ( \ + (I40IW_RING_WORK_AVAILABLE(_ring) != 0) \ + ) + +#define I40IW_RING_WORK_AVAILABLE(_ring) \ + ( \ + (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \ + ) + +#define I40IW_RING_GET_WQES_AVAILABLE(_ring) \ + ( \ + ((_ring).size - I40IW_RING_WORK_AVAILABLE(_ring) - 1) \ + ) + +#define I40IW_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \ + { \ + index = I40IW_RING_GETCURRENT_HEAD(_ring); \ + I40IW_RING_MOVE_HEAD(_ring, _retcode); \ + } + +/* Async Events codes */ +#define I40IW_AE_AMP_UNALLOCATED_STAG 0x0102 +#define I40IW_AE_AMP_INVALID_STAG 0x0103 +#define I40IW_AE_AMP_BAD_QP 0x0104 +#define I40IW_AE_AMP_BAD_PD 0x0105 +#define I40IW_AE_AMP_BAD_STAG_KEY 0x0106 +#define I40IW_AE_AMP_BAD_STAG_INDEX 0x0107 +#define I40IW_AE_AMP_BOUNDS_VIOLATION 0x0108 +#define I40IW_AE_AMP_RIGHTS_VIOLATION 0x0109 +#define I40IW_AE_AMP_TO_WRAP 0x010a +#define I40IW_AE_AMP_FASTREG_SHARED 0x010b +#define I40IW_AE_AMP_FASTREG_VALID_STAG 0x010c +#define I40IW_AE_AMP_FASTREG_MW_STAG 0x010d +#define I40IW_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e +#define I40IW_AE_AMP_FASTREG_PBL_TABLE_OVERFLOW 0x010f +#define I40IW_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 +#define I40IW_AE_AMP_INVALIDATE_SHARED 0x0111 +#define I40IW_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 +#define I40IW_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 +#define I40IW_AE_AMP_MWBIND_VALID_STAG 0x0114 +#define I40IW_AE_AMP_MWBIND_OF_MR_STAG 0x0115 +#define I40IW_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 +#define I40IW_AE_AMP_MWBIND_TO_MW_STAG 0x0117 +#define I40IW_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 +#define I40IW_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 +#define I40IW_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a +#define I40IW_AE_AMP_MWBIND_BIND_DISABLED 0x011b +#define I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132 +#define I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134 +#define I40IW_AE_BAD_CLOSE 0x0201 +#define I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202 +#define I40IW_AE_CQ_OPERATION_ERROR 0x0203 +#define I40IW_AE_PRIV_OPERATION_DENIED 0x011c +#define I40IW_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205 +#define I40IW_AE_STAG_ZERO_INVALID 0x0206 +#define I40IW_AE_IB_RREQ_AND_Q1_FULL 0x0207 +#define I40IW_AE_WQE_UNEXPECTED_OPCODE 0x020a +#define I40IW_AE_WQE_INVALID_PARAMETER 0x020b +#define I40IW_AE_WQE_LSMM_TOO_LONG 0x0220 +#define I40IW_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 +#define I40IW_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 +#define I40IW_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 +#define I40IW_AE_DDP_UBE_INVALID_MO 0x0305 +#define I40IW_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306 +#define I40IW_AE_DDP_UBE_INVALID_QN 0x0307 +#define I40IW_AE_DDP_NO_L_BIT 0x0308 +#define I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311 +#define I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312 +#define I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313 +#define I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314 +#define I40IW_AE_INVALID_ARP_ENTRY 0x0401 +#define I40IW_AE_INVALID_TCP_OPTION_RCVD 0x0402 +#define I40IW_AE_STALE_ARP_ENTRY 0x0403 +#define I40IW_AE_INVALID_MAC_ENTRY 0x0405 +#define I40IW_AE_LLP_CLOSE_COMPLETE 0x0501 +#define I40IW_AE_LLP_CONNECTION_RESET 0x0502 +#define I40IW_AE_LLP_FIN_RECEIVED 0x0503 +#define I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505 +#define I40IW_AE_LLP_SEGMENT_TOO_LARGE 0x0506 +#define I40IW_AE_LLP_SEGMENT_TOO_SMALL 0x0507 +#define I40IW_AE_LLP_SYN_RECEIVED 0x0508 +#define I40IW_AE_LLP_TERMINATE_RECEIVED 0x0509 +#define I40IW_AE_LLP_TOO_MANY_RETRIES 0x050a +#define I40IW_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b +#define I40IW_AE_LLP_DOUBT_REACHABILITY 0x050c +#define I40IW_AE_LLP_RX_VLAN_MISMATCH 0x050d +#define I40IW_AE_RESOURCE_EXHAUSTION 0x0520 +#define I40IW_AE_RESET_SENT 0x0601 +#define I40IW_AE_TERMINATE_SENT 0x0602 +#define I40IW_AE_RESET_NOT_SENT 0x0603 +#define I40IW_AE_LCE_QP_CATASTROPHIC 0x0700 +#define I40IW_AE_LCE_FUNCTION_CATASTROPHIC 0x0701 +#define I40IW_AE_LCE_CQ_CATASTROPHIC 0x0702 +#define I40IW_AE_QP_SUSPEND_COMPLETE 0x0900 + +#define OP_DELETE_LOCAL_MAC_IPADDR_ENTRY 1 +#define OP_CEQ_DESTROY 2 +#define OP_AEQ_DESTROY 3 +#define OP_DELETE_ARP_CACHE_ENTRY 4 +#define OP_MANAGE_APBVT_ENTRY 5 +#define OP_CEQ_CREATE 6 +#define OP_AEQ_CREATE 7 +#define OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY 8 +#define OP_ADD_LOCAL_MAC_IPADDR_ENTRY 9 +#define OP_MANAGE_QHASH_TABLE_ENTRY 10 +#define OP_QP_MODIFY 11 +#define OP_QP_UPLOAD_CONTEXT 12 +#define OP_CQ_CREATE 13 +#define OP_CQ_DESTROY 14 +#define OP_QP_CREATE 15 +#define OP_QP_DESTROY 16 +#define OP_ALLOC_STAG 17 +#define OP_MR_REG_NON_SHARED 18 +#define OP_DEALLOC_STAG 19 +#define OP_MW_ALLOC 20 +#define OP_QP_FLUSH_WQES 21 +#define OP_ADD_ARP_CACHE_ENTRY 22 +#define OP_MANAGE_PUSH_PAGE 23 +#define OP_UPDATE_PE_SDS 24 +#define OP_MANAGE_HMC_PM_FUNC_TABLE 25 +#define OP_SUSPEND 26 +#define OP_RESUME 27 +#define OP_MANAGE_VF_PBLE_BP 28 +#define OP_QUERY_FPM_VALUES 29 +#define OP_COMMIT_FPM_VALUES 30 +#define OP_REQUESTED_COMMANDS 31 +#define OP_COMPLETED_COMMANDS 32 +#define OP_GEN_AE 33 +#define OP_SIZE_CQP_STAT_ARRAY 34 + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.c b/drivers/infiniband/hw/i40iw/i40iw_hmc.c new file mode 100644 index 000000000..5484cbf55 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.c @@ -0,0 +1,821 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +/** + * i40iw_find_sd_index_limit - finds segment descriptor index limit + * @hmc_info: pointer to the HMC configuration information structure + * @type: type of HMC resources we're searching + * @index: starting index for the object + * @cnt: number of objects we're trying to create + * @sd_idx: pointer to return index of the segment descriptor in question + * @sd_limit: pointer to return the maximum number of segment descriptors + * + * This function calculates the segment descriptor index and index limit + * for the resource defined by i40iw_hmc_rsrc_type. + */ + +static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info, + u32 type, + u32 idx, + u32 cnt, + u32 *sd_idx, + u32 *sd_limit) +{ + u64 fpm_addr, fpm_limit; + + fpm_addr = hmc_info->hmc_obj[(type)].base + + hmc_info->hmc_obj[type].size * idx; + fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt; + *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE); + *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE); + *sd_limit += 1; +} + +/** + * i40iw_find_pd_index_limit - finds page descriptor index limit + * @hmc_info: pointer to the HMC configuration information struct + * @type: HMC resource type we're examining + * @idx: starting index for the object + * @cnt: number of objects we're trying to create + * @pd_index: pointer to return page descriptor index + * @pd_limit: pointer to return page descriptor index limit + * + * Calculates the page descriptor index and index limit for the resource + * defined by i40iw_hmc_rsrc_type. + */ + +static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info, + u32 type, + u32 idx, + u32 cnt, + u32 *pd_idx, + u32 *pd_limit) +{ + u64 fpm_adr, fpm_limit; + + fpm_adr = hmc_info->hmc_obj[type].base + + hmc_info->hmc_obj[type].size * idx; + fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); + *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE); + *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE); + *(pd_limit) += 1; +} + +/** + * i40iw_set_sd_entry - setup entry for sd programming + * @pa: physical addr + * @idx: sd index + * @type: paged or direct sd + * @entry: sd entry ptr + */ +static inline void i40iw_set_sd_entry(u64 pa, + u32 idx, + enum i40iw_sd_entry_type type, + struct update_sd_entry *entry) +{ + entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | + (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) << + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | + (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); + entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15)); +} + +/** + * i40iw_clr_sd_entry - setup entry for sd clear + * @idx: sd index + * @type: paged or direct sd + * @entry: sd entry ptr + */ +static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type, + struct update_sd_entry *entry) +{ + entry->data = (I40IW_HMC_MAX_BP_COUNT << + I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | + (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) << + I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); + entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15)); +} + +/** + * i40iw_hmc_sd_one - setup 1 sd entry for cqp + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + * @pa: physical addr + * @sd_idx: sd index + * @type: paged or direct sd + * @setsd: flag to set or clear sd + */ +enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, + u8 hmc_fn_id, + u64 pa, u32 sd_idx, + enum i40iw_sd_entry_type type, + bool setsd) +{ + struct i40iw_update_sds_info sdinfo; + + sdinfo.cnt = 1; + sdinfo.hmc_fn_id = hmc_fn_id; + if (setsd) + i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry); + else + i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry); + + return dev->cqp->process_cqp_sds(dev, &sdinfo); +} + +/** + * i40iw_hmc_sd_grp - setup group od sd entries for cqp + * @dev: pointer to the device structure + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: sd index + * @sd_cnt: number of sd entries + * @setsd: flag to set or clear sd + */ +static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev, + struct i40iw_hmc_info *hmc_info, + u32 sd_index, + u32 sd_cnt, + bool setsd) +{ + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_update_sds_info sdinfo; + u64 pa; + u32 i; + enum i40iw_status_code ret_code = 0; + + memset(&sdinfo, 0, sizeof(sdinfo)); + sdinfo.hmc_fn_id = hmc_info->hmc_fn_id; + for (i = sd_index; i < sd_index + sd_cnt; i++) { + sd_entry = &hmc_info->sd_table.sd_entry[i]; + if (!sd_entry || + (!sd_entry->valid && setsd) || + (sd_entry->valid && !setsd)) + continue; + if (setsd) { + pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : + sd_entry->u.bp.addr.pa; + i40iw_set_sd_entry(pa, i, sd_entry->entry_type, + &sdinfo.entry[sdinfo.cnt]); + } else { + i40iw_clr_sd_entry(i, sd_entry->entry_type, + &sdinfo.entry[sdinfo.cnt]); + } + sdinfo.cnt++; + if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) { + ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "i40iw_hmc_sd_grp: sd_programming failed err=%d\n", + ret_code); + return ret_code; + } + sdinfo.cnt = 0; + } + } + if (sdinfo.cnt) + ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo); + + return ret_code; +} + +/** + * i40iw_vfdev_from_fpm - return vf dev ptr for hmc function id + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + */ +struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id) +{ + struct i40iw_vfdev *vf_dev = NULL; + u16 idx; + + for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) { + if (dev->vf_dev[idx] && + ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) { + vf_dev = dev->vf_dev[idx]; + break; + } + } + return vf_dev; +} + +/** + * i40iw_vf_hmcinfo_from_fpm - get ptr to hmc for func_id + * @dev: pointer to the device structure + * @hmc_fn_id: hmc's function id + */ +struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id) +{ + struct i40iw_hmc_info *hmc_info = NULL; + u16 idx; + + for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) { + if (dev->vf_dev[idx] && + ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) { + hmc_info = &dev->vf_dev[idx]->hmc_info; + break; + } + } + return hmc_info; +} + +/** + * i40iw_hmc_finish_add_sd_reg - program sd entries for objects + * @dev: pointer to the device structure + * @info: create obj info + */ +static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + + if (!info->add_sd_cnt) + return 0; + + return i40iw_hmc_sd_grp(dev, info->hmc_info, + info->hmc_info->sd_indexes[0], + info->add_sd_cnt, true); +} + +/** + * i40iw_create_iw_hmc_obj - allocate backing store for hmc objects + * @dev: pointer to the device structure + * @info: pointer to i40iw_hmc_iw_create_obj_info struct + * + * This will allocate memory for PDs and backing pages and populate + * the sd and pd entries. + */ +enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + struct i40iw_hmc_sd_entry *sd_entry; + u32 sd_idx, sd_lmt; + u32 pd_idx = 0, pd_lmt = 0; + u32 pd_idx1 = 0, pd_lmt1 = 0; + u32 i, j; + bool pd_error = false; + enum i40iw_status_code ret_code = 0; + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error type %u, start = %u, req cnt %u, cnt = %u\n", + __func__, info->rsrc_type, info->start_idx, info->count, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + } + + if (!dev->is_pf) + return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count); + + i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, + &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + return I40IW_ERR_INVALID_SD_INDEX; + } + i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, &pd_lmt); + + for (j = sd_idx; j < sd_lmt; j++) { + ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info, + j, + info->entry_type, + I40IW_HMC_DIRECT_BP_SIZE); + if (ret_code) + goto exit_sd_error; + sd_entry = &info->hmc_info->sd_table.sd_entry[j]; + + if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) && + ((dev->hmc_info == info->hmc_info) && + (info->rsrc_type != I40IW_HMC_IW_PBLE))) { + pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT)); + pd_lmt1 = min(pd_lmt, + (j + 1) * I40IW_HMC_MAX_BP_COUNT); + for (i = pd_idx1; i < pd_lmt1; i++) { + /* update the pd table entry */ + ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info, + i, NULL); + if (ret_code) { + pd_error = true; + break; + } + } + if (pd_error) { + while (i && (i > pd_idx1)) { + i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1), + info->is_pf); + i--; + } + } + } + if (sd_entry->valid) + continue; + + info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j; + info->add_sd_cnt++; + sd_entry->valid = true; + } + return i40iw_hmc_finish_add_sd_reg(dev, info); + +exit_sd_error: + while (j && (j > sd_idx)) { + sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1]; + switch (sd_entry->entry_type) { + case I40IW_SD_TYPE_PAGED: + pd_idx1 = max(pd_idx, + (j - 1) * I40IW_HMC_MAX_BP_COUNT); + pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT)); + for (i = pd_idx1; i < pd_lmt1; i++) + i40iw_prep_remove_pd_page(info->hmc_info, i); + break; + case I40IW_SD_TYPE_DIRECT: + i40iw_prep_remove_pd_page(info->hmc_info, (j - 1)); + break; + default: + ret_code = I40IW_ERR_INVALID_SD_TYPE; + break; + } + j--; + } + + return ret_code; +} + +/** + * i40iw_finish_del_sd_reg - delete sd entries for objects + * @dev: pointer to the device structure + * @info: dele obj info + * @reset: true if called before reset + */ +static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset) +{ + struct i40iw_hmc_sd_entry *sd_entry; + enum i40iw_status_code ret_code = 0; + u32 i, sd_idx; + struct i40iw_dma_mem *mem; + + if (dev->is_pf && !reset) + ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info, + info->hmc_info->sd_indexes[0], + info->del_sd_cnt, false); + + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__); + + for (i = 0; i < info->del_sd_cnt; i++) { + sd_idx = info->hmc_info->sd_indexes[i]; + sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx]; + if (!sd_entry) + continue; + mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ? + &sd_entry->u.pd_table.pd_page_addr : + &sd_entry->u.bp.addr; + + if (!mem || !mem->va) + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__); + else + i40iw_free_dma_mem(dev->hw, mem); + } + return ret_code; +} + +/** + * i40iw_del_iw_hmc_obj - remove pe hmc objects + * @dev: pointer to the device structure + * @info: pointer to i40iw_hmc_del_obj_info struct + * @reset: true if called before reset + * + * This will de-populate the SDs and PDs. It frees + * the memory for PDS and backing storage. After this function is returned, + * caller should deallocate memory allocated previously for + * book-keeping information about PDs and backing storage. + */ +enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset) +{ + struct i40iw_hmc_pd_table *pd_table; + u32 sd_idx, sd_lmt; + u32 pd_idx, pd_lmt, rel_pd_idx; + u32 i, j; + enum i40iw_status_code ret_code = 0; + + if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n", + __func__, info->start_idx, info->rsrc_type, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_INDEX; + } + + if ((info->start_idx + info->count) > + info->hmc_info->hmc_obj[info->rsrc_type].cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, + "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n", + __func__, info->start_idx, info->count, + info->rsrc_type, + info->hmc_info->hmc_obj[info->rsrc_type].cnt); + return I40IW_ERR_INVALID_HMC_OBJ_COUNT; + } + if (!dev->is_pf) { + ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0, + info->count); + if (info->rsrc_type != I40IW_HMC_IW_PBLE) + return ret_code; + } + + i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &pd_idx, &pd_lmt); + + for (j = pd_idx; j < pd_lmt; j++) { + sd_idx = j / I40IW_HMC_PD_CNT_IN_SD; + + if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type != + I40IW_SD_TYPE_PAGED) + continue; + + rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD; + pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + if (pd_table->pd_entry[rel_pd_idx].valid) { + ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j, + info->is_pf); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__); + return ret_code; + } + } + } + + i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type, + info->start_idx, info->count, &sd_idx, &sd_lmt); + if (sd_idx >= info->hmc_info->sd_table.sd_cnt || + sd_lmt > info->hmc_info->sd_table.sd_cnt) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__); + return I40IW_ERR_INVALID_SD_INDEX; + } + + for (i = sd_idx; i < sd_lmt; i++) { + if (!info->hmc_info->sd_table.sd_entry[i].valid) + continue; + switch (info->hmc_info->sd_table.sd_entry[i].entry_type) { + case I40IW_SD_TYPE_DIRECT: + ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i); + if (!ret_code) { + info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; + info->del_sd_cnt++; + } + break; + case I40IW_SD_TYPE_PAGED: + ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i); + if (!ret_code) { + info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i; + info->del_sd_cnt++; + } + break; + default: + break; + } + } + return i40iw_finish_del_sd_reg(dev, info, reset); +} + +/** + * i40iw_add_sd_table_entry - Adds a segment descriptor to the table + * @hw: pointer to our hw struct + * @hmc_info: pointer to the HMC configuration information struct + * @sd_index: segment descriptor index to manipulate + * @type: what type of segment descriptor we're manipulating + * @direct_mode_sz: size to alloc in direct mode + */ +enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 sd_index, + enum i40iw_sd_entry_type type, + u64 direct_mode_sz) +{ + enum i40iw_status_code ret_code = 0; + struct i40iw_hmc_sd_entry *sd_entry; + bool dma_mem_alloc_done = false; + struct i40iw_dma_mem mem; + u64 alloc_len; + + sd_entry = &hmc_info->sd_table.sd_entry[sd_index]; + if (!sd_entry->valid) { + if (type == I40IW_SD_TYPE_PAGED) + alloc_len = I40IW_HMC_PAGED_BP_SIZE; + else + alloc_len = direct_mode_sz; + + /* allocate a 4K pd page or 2M backing page */ + ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len, + I40IW_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + goto exit; + dma_mem_alloc_done = true; + if (type == I40IW_SD_TYPE_PAGED) { + ret_code = i40iw_allocate_virt_mem(hw, + &sd_entry->u.pd_table.pd_entry_virt_mem, + sizeof(struct i40iw_hmc_pd_entry) * 512); + if (ret_code) + goto exit; + sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *) + sd_entry->u.pd_table.pd_entry_virt_mem.va; + + memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem)); + } else { + memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem)); + sd_entry->u.bp.sd_pd_index = sd_index; + } + + hmc_info->sd_table.sd_entry[sd_index].entry_type = type; + + I40IW_INC_SD_REFCNT(&hmc_info->sd_table); + } + if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT) + I40IW_INC_BP_REFCNT(&sd_entry->u.bp); +exit: + if (ret_code) + if (dma_mem_alloc_done) + i40iw_free_dma_mem(hw, &mem); + + return ret_code; +} + +/** + * i40iw_add_pd_table_entry - Adds page descriptor to the specified table + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @pd_index: which page descriptor index to manipulate + * @rsrc_pg: if not NULL, use preallocated page instead of allocating new one. + * + * This function: + * 1. Initializes the pd entry + * 2. Adds pd_entry in the pd_table + * 3. Mark the entry valid in i40iw_hmc_pd_entry structure + * 4. Initializes the pd_entry's ref count to 1 + * assumptions: + * 1. The memory for pd should be pinned down, physically contiguous and + * aligned on 4K boundary and zeroed memory. + * 2. It should be 4K in size. + */ +enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 pd_index, + struct i40iw_dma_mem *rsrc_pg) +{ + enum i40iw_status_code ret_code = 0; + struct i40iw_hmc_pd_table *pd_table; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_dma_mem mem; + struct i40iw_dma_mem *page = &mem; + u32 sd_idx, rel_pd_idx; + u64 *pd_addr; + u64 page_desc; + + if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt) + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + + sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD); + if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED) + return 0; + + rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD); + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + if (!pd_entry->valid) { + if (rsrc_pg) { + pd_entry->rsrc_pg = true; + page = rsrc_pg; + } else { + ret_code = i40iw_allocate_dma_mem(hw, page, + I40IW_HMC_PAGED_BP_SIZE, + I40IW_HMC_PD_BP_BUF_ALIGNMENT); + if (ret_code) + return ret_code; + pd_entry->rsrc_pg = false; + } + + memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem)); + pd_entry->bp.sd_pd_index = pd_index; + pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED; + page_desc = page->pa | 0x1; + + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + + memcpy(pd_addr, &page_desc, sizeof(*pd_addr)); + + pd_entry->sd_index = sd_idx; + pd_entry->valid = true; + I40IW_INC_PD_REFCNT(pd_table); + if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID) + I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx); + else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id) + I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx, + hmc_info->hmc_fn_id); + } + I40IW_INC_BP_REFCNT(&pd_entry->bp); + + return 0; +} + +/** + * i40iw_remove_pd_bp - remove a backing page from a page descriptor + * @hw: pointer to our HW structure + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + * @is_pf: distinguishes a VF from a PF + * + * This function: + * 1. Marks the entry in pd table (for paged address mode) or in sd table + * (for direct address mode) invalid. + * 2. Write to register PMPDINV to invalidate the backing page in FV cache + * 3. Decrement the ref count for the pd _entry + * assumptions: + * 1. Caller can deallocate the memory used by backing storage after this + * function returns. + */ +enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, + u32 idx, + bool is_pf) +{ + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_hmc_pd_table *pd_table; + struct i40iw_hmc_sd_entry *sd_entry; + u32 sd_idx, rel_pd_idx; + struct i40iw_dma_mem *mem; + u64 *pd_addr; + + sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD; + rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD; + if (sd_idx >= hmc_info->sd_table.sd_cnt) + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + + sd_entry = &hmc_info->sd_table.sd_entry[sd_idx]; + if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED) + return I40IW_ERR_INVALID_SD_TYPE; + + pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table; + pd_entry = &pd_table->pd_entry[rel_pd_idx]; + I40IW_DEC_BP_REFCNT(&pd_entry->bp); + if (pd_entry->bp.ref_cnt) + return 0; + + pd_entry->valid = false; + I40IW_DEC_PD_REFCNT(pd_table); + pd_addr = (u64 *)pd_table->pd_page_addr.va; + pd_addr += rel_pd_idx; + memset(pd_addr, 0, sizeof(u64)); + if (is_pf) + I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx); + else + I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, + hmc_info->hmc_fn_id); + + if (!pd_entry->rsrc_pg) { + mem = &pd_entry->bp.addr; + if (!mem || !mem->va) + return I40IW_ERR_PARAM; + i40iw_free_dma_mem(hw, mem); + } + if (!pd_table->ref_cnt) + i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem); + + return 0; +} + +/** + * i40iw_prep_remove_sd_bp - Prepares to remove a backing page from a sd entry + * @hmc_info: pointer to the HMC configuration information structure + * @idx: the page index + */ +enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx) +{ + struct i40iw_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + I40IW_DEC_BP_REFCNT(&sd_entry->u.bp); + if (sd_entry->u.bp.ref_cnt) + return I40IW_ERR_NOT_READY; + + I40IW_DEC_SD_REFCNT(&hmc_info->sd_table); + sd_entry->valid = false; + + return 0; +} + +/** + * i40iw_prep_remove_pd_page - Prepares to remove a PD page from sd entry. + * @hmc_info: pointer to the HMC configuration information structure + * @idx: segment descriptor index to find the relevant page descriptor + */ +enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, + u32 idx) +{ + struct i40iw_hmc_sd_entry *sd_entry; + + sd_entry = &hmc_info->sd_table.sd_entry[idx]; + + if (sd_entry->u.pd_table.ref_cnt) + return I40IW_ERR_NOT_READY; + + sd_entry->valid = false; + I40IW_DEC_SD_REFCNT(&hmc_info->sd_table); + + return 0; +} + +/** + * i40iw_pf_init_vfhmc - + * @vf_cnt_array: array of cnt values of iwarp hmc objects + * @vf_hmc_fn_id: hmc function id ofr vf driver + * @dev: pointer to i40iw_dev struct + * + * Called by pf driver to initialize hmc_info for vf driver instance. + */ +enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, + u8 vf_hmc_fn_id, + u32 *vf_cnt_array) +{ + struct i40iw_hmc_info *hmc_info; + enum i40iw_status_code ret_code = 0; + u32 i; + + if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) || + (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID + + I40IW_MAX_PE_ENABLED_VF_COUNT)) { + i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n", + __func__, vf_hmc_fn_id); + return I40IW_ERR_INVALID_HMCFN_ID; + } + + ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id); + if (ret_code) + return ret_code; + + hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id); + + for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++) + if (vf_cnt_array) + hmc_info->hmc_obj[i].cnt = + vf_cnt_array[i - I40IW_HMC_IW_QP]; + else + hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt; + + return 0; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_hmc.h b/drivers/infiniband/hw/i40iw/i40iw_hmc.h new file mode 100644 index 000000000..4c3fdd875 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hmc.h @@ -0,0 +1,241 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_HMC_H +#define I40IW_HMC_H + +#include "i40iw_d.h" + +struct i40iw_hw; +enum i40iw_status_code; + +#define I40IW_HMC_MAX_BP_COUNT 512 +#define I40IW_MAX_SD_ENTRIES 11 +#define I40IW_HW_DBG_HMC_INVALID_BP_MARK 0xCA + +#define I40IW_HMC_INFO_SIGNATURE 0x484D5347 +#define I40IW_HMC_PD_CNT_IN_SD 512 +#define I40IW_HMC_DIRECT_BP_SIZE 0x200000 +#define I40IW_HMC_MAX_SD_COUNT 4096 +#define I40IW_HMC_PAGED_BP_SIZE 4096 +#define I40IW_HMC_PD_BP_BUF_ALIGNMENT 4096 +#define I40IW_FIRST_VF_FPM_ID 16 +#define FPM_MULTIPLIER 1024 + +#define I40IW_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) +#define I40IW_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) +#define I40IW_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) + +#define I40IW_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) +#define I40IW_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) +#define I40IW_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) + +/** + * I40IW_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + */ +#define I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ + i40iw_wr32((hw), I40E_PFHMC_PDINV, \ + (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) | \ + ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +/** + * I40IW_INVALIDATE_VF_HMC_PD - Invalidates the pd cache in the hardware + * @hw: pointer to our hw struct + * @sd_idx: segment descriptor index + * @pd_idx: page descriptor index + * @hmc_fn_id: VF's function id + */ +#define I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \ + i40iw_wr32(hw, I40E_GLHMC_VFPDINV(hmc_fn_id - I40IW_FIRST_VF_FPM_ID), \ + ((sd_idx << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ + (pd_idx << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) + +struct i40iw_hmc_obj_info { + u64 base; + u32 max_cnt; + u32 cnt; + u64 size; +}; + +enum i40iw_sd_entry_type { + I40IW_SD_TYPE_INVALID = 0, + I40IW_SD_TYPE_PAGED = 1, + I40IW_SD_TYPE_DIRECT = 2 +}; + +struct i40iw_hmc_bp { + enum i40iw_sd_entry_type entry_type; + struct i40iw_dma_mem addr; + u32 sd_pd_index; + u32 ref_cnt; +}; + +struct i40iw_hmc_pd_entry { + struct i40iw_hmc_bp bp; + u32 sd_index; + bool rsrc_pg; + bool valid; +}; + +struct i40iw_hmc_pd_table { + struct i40iw_dma_mem pd_page_addr; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_virt_mem pd_entry_virt_mem; + u32 ref_cnt; + u32 sd_index; +}; + +struct i40iw_hmc_sd_entry { + enum i40iw_sd_entry_type entry_type; + bool valid; + + union { + struct i40iw_hmc_pd_table pd_table; + struct i40iw_hmc_bp bp; + } u; +}; + +struct i40iw_hmc_sd_table { + struct i40iw_virt_mem addr; + u32 sd_cnt; + u32 ref_cnt; + struct i40iw_hmc_sd_entry *sd_entry; +}; + +struct i40iw_hmc_info { + u32 signature; + u8 hmc_fn_id; + u16 first_sd_index; + + struct i40iw_hmc_obj_info *hmc_obj; + struct i40iw_virt_mem hmc_obj_virt_mem; + struct i40iw_hmc_sd_table sd_table; + u16 sd_indexes[I40IW_HMC_MAX_SD_COUNT]; +}; + +struct update_sd_entry { + u64 cmd; + u64 data; +}; + +struct i40iw_update_sds_info { + u32 cnt; + u8 hmc_fn_id; + struct update_sd_entry entry[I40IW_MAX_SD_ENTRIES]; +}; + +struct i40iw_ccq_cqe_info; +struct i40iw_hmc_fcn_info { + void (*callback_fcn)(struct i40iw_sc_dev *, void *, + struct i40iw_ccq_cqe_info *); + void *cqp_callback_param; + u32 vf_id; + u16 iw_vf_idx; + bool free_fcn; +}; + +enum i40iw_hmc_rsrc_type { + I40IW_HMC_IW_QP = 0, + I40IW_HMC_IW_CQ = 1, + I40IW_HMC_IW_SRQ = 2, + I40IW_HMC_IW_HTE = 3, + I40IW_HMC_IW_ARP = 4, + I40IW_HMC_IW_APBVT_ENTRY = 5, + I40IW_HMC_IW_MR = 6, + I40IW_HMC_IW_XF = 7, + I40IW_HMC_IW_XFFL = 8, + I40IW_HMC_IW_Q1 = 9, + I40IW_HMC_IW_Q1FL = 10, + I40IW_HMC_IW_TIMER = 11, + I40IW_HMC_IW_FSIMC = 12, + I40IW_HMC_IW_FSIAV = 13, + I40IW_HMC_IW_PBLE = 14, + I40IW_HMC_IW_MAX = 15, +}; + +struct i40iw_hmc_create_obj_info { + struct i40iw_hmc_info *hmc_info; + struct i40iw_virt_mem add_sd_virt_mem; + u32 rsrc_type; + u32 start_idx; + u32 count; + u32 add_sd_cnt; + enum i40iw_sd_entry_type entry_type; + bool is_pf; +}; + +struct i40iw_hmc_del_obj_info { + struct i40iw_hmc_info *hmc_info; + struct i40iw_virt_mem del_sd_virt_mem; + u32 rsrc_type; + u32 start_idx; + u32 count; + u32 del_sd_cnt; + bool is_pf; +}; + +enum i40iw_status_code i40iw_copy_dma_mem(struct i40iw_hw *hw, void *dest_buf, + struct i40iw_dma_mem *src_mem, u64 src_offset, u64 size); +enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info); +enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *info, + bool reset); +enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev, u8 hmc_fn_id, + u64 pa, u32 sd_idx, enum i40iw_sd_entry_type type, + bool setsd); +enum i40iw_status_code i40iw_update_sds_noccq(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info); +struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id); +struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 sd_index, + enum i40iw_sd_entry_type type, u64 direct_mode_sz); +enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 pd_index, + struct i40iw_dma_mem *rsrc_pg); +enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw, + struct i40iw_hmc_info *hmc_info, u32 idx, bool is_pf); +enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx); +enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info, u32 idx); + +#define ENTER_SHARED_FUNCTION() +#define EXIT_SHARED_FUNCTION() + +#endif /* I40IW_HMC_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c new file mode 100644 index 000000000..ae8b97c30 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c @@ -0,0 +1,852 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> + +#include "i40iw.h" + +/** + * i40iw_initialize_hw_resources - initialize hw resource during open + * @iwdev: iwarp device + */ +u32 i40iw_initialize_hw_resources(struct i40iw_device *iwdev) +{ + unsigned long num_pds; + u32 resources_size; + u32 max_mr; + u32 max_qp; + u32 max_cq; + u32 arp_table_size; + u32 mrdrvbits; + void *resource_ptr; + + max_qp = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt; + max_cq = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + max_mr = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_MR].cnt; + arp_table_size = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_ARP].cnt; + iwdev->max_cqe = 0xFFFFF; + num_pds = I40IW_MAX_PDS; + resources_size = sizeof(struct i40iw_arp_entry) * arp_table_size; + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_qp); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_mr); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(max_cq); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(num_pds); + resources_size += sizeof(unsigned long) * BITS_TO_LONGS(arp_table_size); + resources_size += sizeof(struct i40iw_qp **) * max_qp; + iwdev->mem_resources = kzalloc(resources_size, GFP_KERNEL); + + if (!iwdev->mem_resources) + return -ENOMEM; + + iwdev->max_qp = max_qp; + iwdev->max_mr = max_mr; + iwdev->max_cq = max_cq; + iwdev->max_pd = num_pds; + iwdev->arp_table_size = arp_table_size; + iwdev->arp_table = (struct i40iw_arp_entry *)iwdev->mem_resources; + resource_ptr = iwdev->mem_resources + (sizeof(struct i40iw_arp_entry) * arp_table_size); + + iwdev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | + IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; + + iwdev->allocated_qps = resource_ptr; + iwdev->allocated_cqs = &iwdev->allocated_qps[BITS_TO_LONGS(max_qp)]; + iwdev->allocated_mrs = &iwdev->allocated_cqs[BITS_TO_LONGS(max_cq)]; + iwdev->allocated_pds = &iwdev->allocated_mrs[BITS_TO_LONGS(max_mr)]; + iwdev->allocated_arps = &iwdev->allocated_pds[BITS_TO_LONGS(num_pds)]; + iwdev->qp_table = (struct i40iw_qp **)(&iwdev->allocated_arps[BITS_TO_LONGS(arp_table_size)]); + set_bit(0, iwdev->allocated_mrs); + set_bit(0, iwdev->allocated_qps); + set_bit(0, iwdev->allocated_cqs); + set_bit(0, iwdev->allocated_pds); + set_bit(0, iwdev->allocated_arps); + + /* Following for ILQ/IEQ */ + set_bit(1, iwdev->allocated_qps); + set_bit(1, iwdev->allocated_cqs); + set_bit(1, iwdev->allocated_pds); + set_bit(2, iwdev->allocated_cqs); + set_bit(2, iwdev->allocated_pds); + + spin_lock_init(&iwdev->resource_lock); + spin_lock_init(&iwdev->qptable_lock); + /* stag index mask has a minimum of 14 bits */ + mrdrvbits = 24 - max(get_count_order(iwdev->max_mr), 14); + iwdev->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); + return 0; +} + +/** + * i40iw_cqp_ce_handler - handle cqp completions + * @iwdev: iwarp device + * @arm: flag to arm after completions + * @cq: cq for cqp completions + */ +static void i40iw_cqp_ce_handler(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq, bool arm) +{ + struct i40iw_cqp_request *cqp_request; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + u32 cqe_count = 0; + struct i40iw_ccq_cqe_info info; + int ret; + + do { + memset(&info, 0, sizeof(info)); + ret = dev->ccq_ops->ccq_get_cqe_info(cq, &info); + if (ret) + break; + cqp_request = (struct i40iw_cqp_request *)(unsigned long)info.scratch; + if (info.error) + i40iw_pr_err("opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", + info.op_code, info.maj_err_code, info.min_err_code); + if (cqp_request) { + cqp_request->compl_info.maj_err_code = info.maj_err_code; + cqp_request->compl_info.min_err_code = info.min_err_code; + cqp_request->compl_info.op_ret_val = info.op_ret_val; + cqp_request->compl_info.error = info.error; + + if (cqp_request->waiting) { + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); + } else { + if (cqp_request->callback_fcn) + cqp_request->callback_fcn(cqp_request, 1); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); + } + } + + cqe_count++; + } while (1); + + if (arm && cqe_count) { + i40iw_process_bh(dev); + dev->ccq_ops->ccq_arm(cq); + } +} + +/** + * i40iw_iwarp_ce_handler - handle iwarp completions + * @iwdev: iwarp device + * @iwcp: iwarp cq receiving event + */ +static void i40iw_iwarp_ce_handler(struct i40iw_device *iwdev, + struct i40iw_sc_cq *iwcq) +{ + struct i40iw_cq *i40iwcq = iwcq->back_cq; + + if (i40iwcq->ibcq.comp_handler) + i40iwcq->ibcq.comp_handler(&i40iwcq->ibcq, + i40iwcq->ibcq.cq_context); +} + +/** + * i40iw_puda_ce_handler - handle puda completion events + * @iwdev: iwarp device + * @cq: puda completion q for event + */ +static void i40iw_puda_ce_handler(struct i40iw_device *iwdev, + struct i40iw_sc_cq *cq) +{ + struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)&iwdev->sc_dev; + enum i40iw_status_code status; + u32 compl_error; + + do { + status = i40iw_puda_poll_completion(dev, cq, &compl_error); + if (status == I40IW_ERR_QUEUE_EMPTY) + break; + if (status) { + i40iw_pr_err("puda status = %d\n", status); + break; + } + if (compl_error) { + i40iw_pr_err("puda compl_err =0x%x\n", compl_error); + break; + } + } while (1); + + dev->ccq_ops->ccq_arm(cq); +} + +/** + * i40iw_process_ceq - handle ceq for completions + * @iwdev: iwarp device + * @ceq: ceq having cq for completion + */ +void i40iw_process_ceq(struct i40iw_device *iwdev, struct i40iw_ceq *ceq) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_sc_ceq *sc_ceq; + struct i40iw_sc_cq *cq; + bool arm = true; + + sc_ceq = &ceq->sc_ceq; + do { + cq = dev->ceq_ops->process_ceq(dev, sc_ceq); + if (!cq) + break; + + if (cq->cq_type == I40IW_CQ_TYPE_CQP) + i40iw_cqp_ce_handler(iwdev, cq, arm); + else if (cq->cq_type == I40IW_CQ_TYPE_IWARP) + i40iw_iwarp_ce_handler(iwdev, cq); + else if ((cq->cq_type == I40IW_CQ_TYPE_ILQ) || + (cq->cq_type == I40IW_CQ_TYPE_IEQ)) + i40iw_puda_ce_handler(iwdev, cq); + } while (1); +} + +/** + * i40iw_next_iw_state - modify qp state + * @iwqp: iwarp qp to modify + * @state: next state for qp + * @del_hash: del hash + * @term: term message + * @termlen: length of term message + */ +void i40iw_next_iw_state(struct i40iw_qp *iwqp, + u8 state, + u8 del_hash, + u8 term, + u8 termlen) +{ + struct i40iw_modify_qp_info info; + + memset(&info, 0, sizeof(info)); + info.next_iwarp_state = state; + info.remove_hash_idx = del_hash; + info.cq_num_valid = true; + info.arp_cache_idx_valid = true; + info.dont_send_term = true; + info.dont_send_fin = true; + info.termlen = termlen; + + if (term & I40IWQP_TERM_SEND_TERM_ONLY) + info.dont_send_term = false; + if (term & I40IWQP_TERM_SEND_FIN_ONLY) + info.dont_send_fin = false; + if (iwqp->sc_qp.term_flags && (state == I40IW_QP_STATE_ERROR)) + info.reset_tcp_conn = true; + iwqp->hw_iwarp_state = state; + i40iw_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0); +} + +/** + * i40iw_process_aeq - handle aeq events + * @iwdev: iwarp device + */ +void i40iw_process_aeq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + struct i40iw_sc_aeq *sc_aeq = &aeq->sc_aeq; + struct i40iw_aeqe_info aeinfo; + struct i40iw_aeqe_info *info = &aeinfo; + int ret; + struct i40iw_qp *iwqp = NULL; + struct i40iw_sc_cq *cq = NULL; + struct i40iw_cq *iwcq = NULL; + struct i40iw_sc_qp *qp = NULL; + struct i40iw_qp_host_ctx_info *ctx_info = NULL; + unsigned long flags; + + u32 aeqcnt = 0; + + if (!sc_aeq->size) + return; + + do { + memset(info, 0, sizeof(*info)); + ret = dev->aeq_ops->get_next_aeqe(sc_aeq, info); + if (ret) + break; + + aeqcnt++; + i40iw_debug(dev, I40IW_DEBUG_AEQ, + "%s ae_id = 0x%x bool qp=%d qp_id = %d\n", + __func__, info->ae_id, info->qp, info->qp_cq_id); + if (info->qp) { + spin_lock_irqsave(&iwdev->qptable_lock, flags); + iwqp = iwdev->qp_table[info->qp_cq_id]; + if (!iwqp) { + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + i40iw_debug(dev, I40IW_DEBUG_AEQ, + "%s qp_id %d is already freed\n", + __func__, info->qp_cq_id); + continue; + } + i40iw_add_ref(&iwqp->ibqp); + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + qp = &iwqp->sc_qp; + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_tcp_state = info->tcp_state; + iwqp->hw_iwarp_state = info->iwarp_state; + iwqp->last_aeq = info->ae_id; + spin_unlock_irqrestore(&iwqp->lock, flags); + ctx_info = &iwqp->ctx_info; + ctx_info->err_rq_idx_valid = true; + } else { + if (info->ae_id != I40IW_AE_CQ_OPERATION_ERROR) + continue; + } + + switch (info->ae_id) { + case I40IW_AE_LLP_FIN_RECEIVED: + if (qp->term_flags) + break; + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSE_WAIT; + if ((iwqp->hw_tcp_state == I40IW_TCP_STATE_CLOSE_WAIT) && + (iwqp->ibqp_state == IB_QPS_RTS)) { + i40iw_next_iw_state(iwqp, + I40IW_QP_STATE_CLOSING, 0, 0, 0); + i40iw_cm_disconn(iwqp); + } + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + break; + case I40IW_AE_LLP_CLOSE_COMPLETE: + if (qp->term_flags) + i40iw_terminate_done(qp, 0); + else + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_BAD_CLOSE: + /* fall through */ + case I40IW_AE_RESET_SENT: + i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 1, 0, 0); + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_LLP_CONNECTION_RESET: + if (atomic_read(&iwqp->close_timer_started)) + break; + i40iw_cm_disconn(iwqp); + break; + case I40IW_AE_QP_SUSPEND_COMPLETE: + i40iw_qp_suspend_resume(dev, &iwqp->sc_qp, false); + break; + case I40IW_AE_TERMINATE_SENT: + i40iw_terminate_send_fin(qp); + break; + case I40IW_AE_LLP_TERMINATE_RECEIVED: + i40iw_terminate_received(qp, info); + break; + case I40IW_AE_CQ_OPERATION_ERROR: + i40iw_pr_err("Processing an iWARP related AE for CQ misc = 0x%04X\n", + info->ae_id); + cq = (struct i40iw_sc_cq *)(unsigned long)info->compl_ctx; + iwcq = (struct i40iw_cq *)cq->back_cq; + + if (iwcq->ibcq.event_handler) { + struct ib_event ibevent; + + ibevent.device = iwcq->ibcq.device; + ibevent.event = IB_EVENT_CQ_ERR; + ibevent.element.cq = &iwcq->ibcq; + iwcq->ibcq.event_handler(&ibevent, iwcq->ibcq.cq_context); + } + break; + case I40IW_AE_LLP_DOUBT_REACHABILITY: + break; + case I40IW_AE_PRIV_OPERATION_DENIED: + case I40IW_AE_STAG_ZERO_INVALID: + case I40IW_AE_IB_RREQ_AND_Q1_FULL: + case I40IW_AE_DDP_UBE_INVALID_DDP_VERSION: + case I40IW_AE_DDP_UBE_INVALID_MO: + case I40IW_AE_DDP_UBE_INVALID_QN: + case I40IW_AE_DDP_NO_L_BIT: + case I40IW_AE_RDMAP_ROE_INVALID_RDMAP_VERSION: + case I40IW_AE_RDMAP_ROE_UNEXPECTED_OPCODE: + case I40IW_AE_ROE_INVALID_RDMA_READ_REQUEST: + case I40IW_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP: + case I40IW_AE_INVALID_ARP_ENTRY: + case I40IW_AE_INVALID_TCP_OPTION_RCVD: + case I40IW_AE_STALE_ARP_ENTRY: + case I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR: + case I40IW_AE_LLP_SEGMENT_TOO_SMALL: + case I40IW_AE_LLP_SYN_RECEIVED: + case I40IW_AE_LLP_TOO_MANY_RETRIES: + case I40IW_AE_LCE_QP_CATASTROPHIC: + case I40IW_AE_LCE_FUNCTION_CATASTROPHIC: + case I40IW_AE_LCE_CQ_CATASTROPHIC: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_LONG: + case I40IW_AE_UDA_XMIT_DGRAM_TOO_SHORT: + ctx_info->err_rq_idx_valid = false; + /* fall through */ + default: + if (!info->sq && ctx_info->err_rq_idx_valid) { + ctx_info->err_rq_idx = info->wqe_idx; + ctx_info->tcp_info_valid = false; + ctx_info->iwarp_info_valid = false; + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + iwqp->host_ctx.va, + ctx_info); + } + i40iw_terminate_connection(qp, info); + break; + } + if (info->qp) + i40iw_rem_ref(&iwqp->ibqp); + } while (1); + + if (aeqcnt) + dev->aeq_ops->repost_aeq_entries(dev, aeqcnt); +} + +/** + * i40iw_cqp_manage_abvpt_cmd - send cqp command manage abpvt + * @iwdev: iwarp device + * @accel_local_port: port for apbvt + * @add_port: add or delete port + */ +static enum i40iw_status_code +i40iw_cqp_manage_abvpt_cmd(struct i40iw_device *iwdev, + u16 accel_local_port, + bool add_port) +{ + struct i40iw_apbvt_info *info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, add_port); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.manage_apbvt_entry.info; + + memset(info, 0, sizeof(*info)); + info->add = add_port; + info->port = cpu_to_le16(accel_local_port); + + cqp_info->cqp_cmd = OP_MANAGE_APBVT_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_apbvt_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage APBVT entry fail"); + + return status; +} + +/** + * i40iw_manage_apbvt - add or delete tcp port + * @iwdev: iwarp device + * @accel_local_port: port for apbvt + * @add_port: add or delete port + */ +enum i40iw_status_code i40iw_manage_apbvt(struct i40iw_device *iwdev, + u16 accel_local_port, + bool add_port) +{ + struct i40iw_cm_core *cm_core = &iwdev->cm_core; + enum i40iw_status_code status; + unsigned long flags; + bool in_use; + + /* apbvt_lock is held across CQP delete APBVT OP (non-waiting) to + * protect against race where add APBVT CQP can race ahead of the delete + * APBVT for same port. + */ + if (add_port) { + spin_lock_irqsave(&cm_core->apbvt_lock, flags); + in_use = __test_and_set_bit(accel_local_port, + cm_core->ports_in_use); + spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); + if (in_use) + return 0; + return i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port, + true); + } else { + spin_lock_irqsave(&cm_core->apbvt_lock, flags); + in_use = i40iw_port_in_use(cm_core, accel_local_port); + if (in_use) { + spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); + return 0; + } + __clear_bit(accel_local_port, cm_core->ports_in_use); + status = i40iw_cqp_manage_abvpt_cmd(iwdev, accel_local_port, + false); + spin_unlock_irqrestore(&cm_core->apbvt_lock, flags); + return status; + } +} + +/** + * i40iw_manage_arp_cache - manage hw arp cache + * @iwdev: iwarp device + * @mac_addr: mac address ptr + * @ip_addr: ip addr for arp cache + * @action: add, delete or modify + */ +void i40iw_manage_arp_cache(struct i40iw_device *iwdev, + unsigned char *mac_addr, + u32 *ip_addr, + bool ipv4, + u32 action) +{ + struct i40iw_add_arp_cache_entry_info *info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + int arp_index; + + arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action); + if (arp_index < 0) + return; + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + if (action == I40IW_ARP_ADD) { + cqp_info->cqp_cmd = OP_ADD_ARP_CACHE_ENTRY; + info = &cqp_info->in.u.add_arp_cache_entry.info; + memset(info, 0, sizeof(*info)); + info->arp_index = cpu_to_le16((u16)arp_index); + info->permanent = true; + ether_addr_copy(info->mac_addr, mac_addr); + cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + } else { + cqp_info->cqp_cmd = OP_DELETE_ARP_CACHE_ENTRY; + cqp_info->in.u.del_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.del_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.del_arp_cache_entry.arp_index = arp_index; + } + + cqp_info->in.u.add_arp_cache_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.add_arp_cache_entry.scratch = (uintptr_t)cqp_request; + cqp_info->post_sq = 1; + if (i40iw_handle_cqp_op(iwdev, cqp_request)) + i40iw_pr_err("CQP-OP Add/Del Arp Cache entry fail"); +} + +/** + * i40iw_send_syn_cqp_callback - do syn/ack after qhash + * @cqp_request: qhash cqp completion + * @send_ack: flag send ack + */ +static void i40iw_send_syn_cqp_callback(struct i40iw_cqp_request *cqp_request, u32 send_ack) +{ + i40iw_send_syn(cqp_request->param, send_ack); +} + +/** + * i40iw_manage_qhash - add or modify qhash + * @iwdev: iwarp device + * @cminfo: cm info for qhash + * @etype: type (syn or quad) + * @mtype: type of qhash + * @cmnode: cmnode associated with connection + * @wait: wait for completion + * @user_pri:user pri of the connection + */ +enum i40iw_status_code i40iw_manage_qhash(struct i40iw_device *iwdev, + struct i40iw_cm_info *cminfo, + enum i40iw_quad_entry_type etype, + enum i40iw_quad_hash_manage_type mtype, + void *cmnode, + bool wait) +{ + struct i40iw_qhash_table_info *info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_sc_vsi *vsi = &iwdev->vsi; + enum i40iw_status_code status; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(iwcqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.manage_qhash_table_entry.info; + memset(info, 0, sizeof(*info)); + + info->vsi = &iwdev->vsi; + info->manage = mtype; + info->entry_type = etype; + if (cminfo->vlan_id != 0xFFFF) { + info->vlan_valid = true; + info->vlan_id = cpu_to_le16(cminfo->vlan_id); + } else { + info->vlan_valid = false; + } + + info->ipv4_valid = cminfo->ipv4; + info->user_pri = cminfo->user_pri; + ether_addr_copy(info->mac_addr, iwdev->netdev->dev_addr); + info->qp_num = cpu_to_le32(vsi->ilq->qp_id); + info->dest_port = cpu_to_le16(cminfo->loc_port); + info->dest_ip[0] = cpu_to_le32(cminfo->loc_addr[0]); + info->dest_ip[1] = cpu_to_le32(cminfo->loc_addr[1]); + info->dest_ip[2] = cpu_to_le32(cminfo->loc_addr[2]); + info->dest_ip[3] = cpu_to_le32(cminfo->loc_addr[3]); + if (etype == I40IW_QHASH_TYPE_TCP_ESTABLISHED) { + info->src_port = cpu_to_le16(cminfo->rem_port); + info->src_ip[0] = cpu_to_le32(cminfo->rem_addr[0]); + info->src_ip[1] = cpu_to_le32(cminfo->rem_addr[1]); + info->src_ip[2] = cpu_to_le32(cminfo->rem_addr[2]); + info->src_ip[3] = cpu_to_le32(cminfo->rem_addr[3]); + } + if (cmnode) { + cqp_request->callback_fcn = i40iw_send_syn_cqp_callback; + cqp_request->param = (void *)cmnode; + } + + if (info->ipv4_valid) + i40iw_debug(dev, I40IW_DEBUG_CM, + "%s:%s IP=%pI4, port=%d, mac=%pM, vlan_id=%d\n", + __func__, (!mtype) ? "DELETE" : "ADD", + info->dest_ip, + info->dest_port, info->mac_addr, cminfo->vlan_id); + else + i40iw_debug(dev, I40IW_DEBUG_CM, + "%s:%s IP=%pI6, port=%d, mac=%pM, vlan_id=%d\n", + __func__, (!mtype) ? "DELETE" : "ADD", + info->dest_ip, + info->dest_port, info->mac_addr, cminfo->vlan_id); + cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_qhash_table_entry.scratch = (uintptr_t)cqp_request; + cqp_info->cqp_cmd = OP_MANAGE_QHASH_TABLE_ENTRY; + cqp_info->post_sq = 1; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage Qhash Entry fail"); + return status; +} + +/** + * i40iw_hw_flush_wqes - flush qp's wqe + * @iwdev: iwarp device + * @qp: hardware control qp + * @info: info for flush + * @wait: flag wait for completion + */ +enum i40iw_status_code i40iw_hw_flush_wqes(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_qp_flush_info *info, + bool wait) +{ + enum i40iw_status_code status; + struct i40iw_qp_flush_info *hw_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.qp_flush_wqes.info; + memcpy(hw_info, info, sizeof(*hw_info)); + + cqp_info->cqp_cmd = OP_QP_FLUSH_WQES; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_flush_wqes.qp = qp; + cqp_info->in.u.qp_flush_wqes.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + i40iw_pr_err("CQP-OP Flush WQE's fail"); + complete(&iwqp->sq_drained); + complete(&iwqp->rq_drained); + return status; + } + if (!cqp_request->compl_info.maj_err_code) { + switch (cqp_request->compl_info.min_err_code) { + case I40IW_CQP_COMPL_RQ_WQE_FLUSHED: + complete(&iwqp->sq_drained); + break; + case I40IW_CQP_COMPL_SQ_WQE_FLUSHED: + complete(&iwqp->rq_drained); + break; + case I40IW_CQP_COMPL_RQ_SQ_WQE_FLUSHED: + break; + default: + complete(&iwqp->sq_drained); + complete(&iwqp->rq_drained); + break; + } + } + + return 0; +} + +/** + * i40iw_gen_ae - generate AE + * @iwdev: iwarp device + * @qp: qp associated with AE + * @info: info for ae + * @wait: wait for completion + */ +void i40iw_gen_ae(struct i40iw_device *iwdev, + struct i40iw_sc_qp *qp, + struct i40iw_gen_ae_info *info, + bool wait) +{ + struct i40iw_gen_ae_info *ae_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + ae_info = &cqp_request->info.in.u.gen_ae.info; + memcpy(ae_info, info, sizeof(*ae_info)); + + cqp_info->cqp_cmd = OP_GEN_AE; + cqp_info->post_sq = 1; + cqp_info->in.u.gen_ae.qp = qp; + cqp_info->in.u.gen_ae.scratch = (uintptr_t)cqp_request; + if (i40iw_handle_cqp_op(iwdev, cqp_request)) + i40iw_pr_err("CQP OP failed attempting to generate ae_code=0x%x\n", + info->ae_code); +} + +/** + * i40iw_hw_manage_vf_pble_bp - manage vf pbles + * @iwdev: iwarp device + * @info: info for managing pble + * @wait: flag wait for completion + */ +enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev, + struct i40iw_manage_vf_pble_info *info, + bool wait) +{ + enum i40iw_status_code status; + struct i40iw_manage_vf_pble_info *hw_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + if ((iwdev->init_state < CCQ_CREATED) && wait) + wait = false; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + hw_info = &cqp_request->info.in.u.manage_vf_pble_bp.info; + memcpy(hw_info, info, sizeof(*hw_info)); + + cqp_info->cqp_cmd = OP_MANAGE_VF_PBLE_BP; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_vf_pble_bp.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_vf_pble_bp.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage VF pble_bp fail"); + return status; +} + +/** + * i40iw_get_ib_wc - return change flush code to IB's + * @opcode: iwarp flush code + */ +static enum ib_wc_status i40iw_get_ib_wc(enum i40iw_flush_opcode opcode) +{ + switch (opcode) { + case FLUSH_PROT_ERR: + return IB_WC_LOC_PROT_ERR; + case FLUSH_REM_ACCESS_ERR: + return IB_WC_REM_ACCESS_ERR; + case FLUSH_LOC_QP_OP_ERR: + return IB_WC_LOC_QP_OP_ERR; + case FLUSH_REM_OP_ERR: + return IB_WC_REM_OP_ERR; + case FLUSH_LOC_LEN_ERR: + return IB_WC_LOC_LEN_ERR; + case FLUSH_GENERAL_ERR: + return IB_WC_GENERAL_ERR; + case FLUSH_FATAL_ERR: + default: + return IB_WC_FATAL_ERR; + } +} + +/** + * i40iw_set_flush_info - set flush info + * @pinfo: set flush info + * @min: minor err + * @maj: major err + * @opcode: flush error code + */ +static void i40iw_set_flush_info(struct i40iw_qp_flush_info *pinfo, + u16 *min, + u16 *maj, + enum i40iw_flush_opcode opcode) +{ + *min = (u16)i40iw_get_ib_wc(opcode); + *maj = CQE_MAJOR_DRV; + pinfo->userflushcode = true; +} + +/** + * i40iw_flush_wqes - flush wqe for qp + * @iwdev: iwarp device + * @iwqp: qp to flush wqes + */ +void i40iw_flush_wqes(struct i40iw_device *iwdev, struct i40iw_qp *iwqp) +{ + struct i40iw_qp_flush_info info; + struct i40iw_qp_flush_info *pinfo = &info; + + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + memset(pinfo, 0, sizeof(*pinfo)); + info.sq = true; + info.rq = true; + if (qp->term_flags) { + i40iw_set_flush_info(pinfo, &pinfo->sq_minor_code, + &pinfo->sq_major_code, qp->flush_code); + i40iw_set_flush_info(pinfo, &pinfo->rq_minor_code, + &pinfo->rq_major_code, qp->flush_code); + } + (void)i40iw_hw_flush_wqes(iwdev, &iwqp->sc_qp, &info, true); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c new file mode 100644 index 000000000..41227d956 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -0,0 +1,2058 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_vlan.h> +#include <net/addrconf.h> + +#include "i40iw.h" +#include "i40iw_register.h" +#include <net/netevent.h> +#define CLIENT_IW_INTERFACE_VERSION_MAJOR 0 +#define CLIENT_IW_INTERFACE_VERSION_MINOR 01 +#define CLIENT_IW_INTERFACE_VERSION_BUILD 00 + +#define DRV_VERSION_MAJOR 0 +#define DRV_VERSION_MINOR 5 +#define DRV_VERSION_BUILD 123 +#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ + __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD) + +static int debug; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all"); + +static int resource_profile; +module_param(resource_profile, int, 0644); +MODULE_PARM_DESC(resource_profile, + "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution"); + +static int max_rdma_vfs = 32; +module_param(max_rdma_vfs, int, 0644); +MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default"); +static int mpa_version = 2; +module_param(mpa_version, int, 0644); +MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2"); + +MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>"); +MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver"); +MODULE_LICENSE("Dual BSD/GPL"); + +static struct i40e_client i40iw_client; +static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw"; + +static LIST_HEAD(i40iw_handlers); +static spinlock_t i40iw_handler_lock; + +static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, + u32 vf_id, u8 *msg, u16 len); + +static struct notifier_block i40iw_inetaddr_notifier = { + .notifier_call = i40iw_inetaddr_event +}; + +static struct notifier_block i40iw_inetaddr6_notifier = { + .notifier_call = i40iw_inet6addr_event +}; + +static struct notifier_block i40iw_net_notifier = { + .notifier_call = i40iw_net_event +}; + +static struct notifier_block i40iw_netdevice_notifier = { + .notifier_call = i40iw_netdevice_event +}; + +/** + * i40iw_find_i40e_handler - find a handler given a client info + * @ldev: pointer to a client info + */ +static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev) +{ + struct i40iw_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_for_each_entry(hdl, &i40iw_handlers, list) { + if (hdl->ldev.netdev == ldev->netdev) { + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return hdl; + } + } + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return NULL; +} + +/** + * i40iw_find_netdev - find a handler given a netdev + * @netdev: pointer to net_device + */ +struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev) +{ + struct i40iw_handler *hdl; + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_for_each_entry(hdl, &i40iw_handlers, list) { + if (hdl->ldev.netdev == netdev) { + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return hdl; + } + } + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return NULL; +} + +/** + * i40iw_add_handler - add a handler to the list + * @hdl: handler to be added to the handler list + */ +static void i40iw_add_handler(struct i40iw_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_add(&hdl->list, &i40iw_handlers); + spin_unlock_irqrestore(&i40iw_handler_lock, flags); +} + +/** + * i40iw_del_handler - delete a handler from the list + * @hdl: handler to be deleted from the handler list + */ +static int i40iw_del_handler(struct i40iw_handler *hdl) +{ + unsigned long flags; + + spin_lock_irqsave(&i40iw_handler_lock, flags); + list_del(&hdl->list); + spin_unlock_irqrestore(&i40iw_handler_lock, flags); + return 0; +} + +/** + * i40iw_enable_intr - set up device interrupts + * @dev: hardware control device structure + * @msix_id: id of the interrupt to be enabled + */ +static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id) +{ + u32 val; + + val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val); + else + i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val); +} + +/** + * i40iw_dpc - tasklet for aeq and ceq 0 + * @data: iwarp device + */ +static void i40iw_dpc(unsigned long data) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)data; + + if (iwdev->msix_shared) + i40iw_process_ceq(iwdev, iwdev->ceqlist); + i40iw_process_aeq(iwdev); + i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx); +} + +/** + * i40iw_ceq_dpc - dpc handler for CEQ + * @data: data points to CEQ + */ +static void i40iw_ceq_dpc(unsigned long data) +{ + struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; + struct i40iw_device *iwdev = iwceq->iwdev; + + i40iw_process_ceq(iwdev, iwceq); + i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx); +} + +/** + * i40iw_irq_handler - interrupt handler for aeq and ceq0 + * @irq: Interrupt request number + * @data: iwarp device + */ +static irqreturn_t i40iw_irq_handler(int irq, void *data) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)data; + + tasklet_schedule(&iwdev->dpc_tasklet); + return IRQ_HANDLED; +} + +/** + * i40iw_destroy_cqp - destroy control qp + * @iwdev: iwarp device + * @create_done: 1 if cqp create poll was success + * + * Issue destroy cqp request and + * free the resources associated with the cqp + */ +static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp *cqp = &iwdev->cqp; + + if (free_hwcqp) + dev->cqp_ops->cqp_destroy(dev->cqp); + + i40iw_cleanup_pending_cqp_op(iwdev); + + i40iw_free_dma_mem(dev->hw, &cqp->sq); + kfree(cqp->scratch_array); + iwdev->cqp.scratch_array = NULL; + + kfree(cqp->cqp_requests); + cqp->cqp_requests = NULL; +} + +/** + * i40iw_disable_irqs - disable device interrupts + * @dev: hardware control device structure + * @msic_vec: msix vector to disable irq + * @dev_id: parameter to pass to free_irq (used during irq setup) + * + * The function is called when destroying aeq/ceq + */ +static void i40iw_disable_irq(struct i40iw_sc_dev *dev, + struct i40iw_msix_vector *msix_vec, + void *dev_id) +{ + if (dev->is_pf) + i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0); + else + i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0); + irq_set_affinity_hint(msix_vec->irq, NULL); + free_irq(msix_vec->irq, dev_id); +} + +/** + * i40iw_destroy_aeq - destroy aeq + * @iwdev: iwarp device + * + * Issue a destroy aeq request and + * free the resources associated with the aeq + * The function is called during driver unload + */ +static void i40iw_destroy_aeq(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status = I40IW_ERR_NOT_READY; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + + if (!iwdev->msix_shared) + i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); + if (iwdev->reset) + goto exit; + + if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) + status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq); + if (status) + i40iw_pr_err("destroy aeq failed %d\n", status); + +exit: + i40iw_free_dma_mem(dev->hw, &aeq->mem); +} + +/** + * i40iw_destroy_ceq - destroy ceq + * @iwdev: iwarp device + * @iwceq: ceq to be destroyed + * + * Issue a destroy ceq request and + * free the resources associated with the ceq + */ +static void i40iw_destroy_ceq(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq) +{ + enum i40iw_status_code status; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + if (iwdev->reset) + goto exit; + + status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); + if (status) { + i40iw_pr_err("ceq destroy command failed %d\n", status); + goto exit; + } + + status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq); + if (status) + i40iw_pr_err("ceq destroy completion failed %d\n", status); +exit: + i40iw_free_dma_mem(dev->hw, &iwceq->mem); +} + +/** + * i40iw_dele_ceqs - destroy all ceq's + * @iwdev: iwarp device + * + * Go through all of the device ceq's and for each ceq + * disable the ceq interrupt and destroy the ceq + */ +static void i40iw_dele_ceqs(struct i40iw_device *iwdev) +{ + u32 i = 0; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_ceq *iwceq = iwdev->ceqlist; + struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; + + if (iwdev->msix_shared) { + i40iw_disable_irq(dev, msix_vec, (void *)iwdev); + i40iw_destroy_ceq(iwdev, iwceq); + iwceq++; + i++; + } + + for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { + i40iw_disable_irq(dev, msix_vec, (void *)iwceq); + i40iw_destroy_ceq(iwdev, iwceq); + } + + iwdev->sc_dev.ceq_valid = false; +} + +/** + * i40iw_destroy_ccq - destroy control cq + * @iwdev: iwarp device + * + * Issue destroy ccq request and + * free the resources associated with the ccq + */ +static void i40iw_destroy_ccq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_ccq *ccq = &iwdev->ccq; + enum i40iw_status_code status = 0; + + if (!iwdev->reset) + status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); + if (status) + i40iw_pr_err("ccq destroy failed %d\n", status); + i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); +} + +/* types of hmc objects */ +static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = { + I40IW_HMC_IW_QP, + I40IW_HMC_IW_CQ, + I40IW_HMC_IW_HTE, + I40IW_HMC_IW_ARP, + I40IW_HMC_IW_APBVT_ENTRY, + I40IW_HMC_IW_MR, + I40IW_HMC_IW_XF, + I40IW_HMC_IW_XFFL, + I40IW_HMC_IW_Q1, + I40IW_HMC_IW_Q1FL, + I40IW_HMC_IW_TIMER, +}; + +/** + * i40iw_close_hmc_objects_type - delete hmc objects of a given type + * @iwdev: iwarp device + * @obj_type: the hmc object type to be deleted + * @is_pf: true if the function is PF otherwise false + * @reset: true if called before reset + */ +static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type obj_type, + struct i40iw_hmc_info *hmc_info, + bool is_pf, + bool reset) +{ + struct i40iw_hmc_del_obj_info info; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.rsrc_type = obj_type; + info.count = hmc_info->hmc_obj[obj_type].cnt; + info.is_pf = is_pf; + if (dev->hmc_ops->del_hmc_object(dev, &info, reset)) + i40iw_pr_err("del obj of type %d failed\n", obj_type); +} + +/** + * i40iw_del_hmc_objects - remove all device hmc objects + * @dev: iwarp device + * @hmc_info: hmc_info to free + * @is_pf: true if hmc_info belongs to PF, not vf nor allocated + * by PF on behalf of VF + * @reset: true if called before reset + */ +static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev, + struct i40iw_hmc_info *hmc_info, + bool is_pf, + bool reset) +{ + unsigned int i; + + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) + i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset); +} + +/** + * i40iw_ceq_handler - interrupt handler for ceq + * @data: ceq pointer + */ +static irqreturn_t i40iw_ceq_handler(int irq, void *data) +{ + struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data; + + if (iwceq->irq != irq) + i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq); + tasklet_schedule(&iwceq->dpc_tasklet); + return IRQ_HANDLED; +} + +/** + * i40iw_create_hmc_obj_type - create hmc object of a given type + * @dev: hardware control device structure + * @info: information for the hmc object to create + */ +static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *info) +{ + return dev->hmc_ops->create_hmc_object(dev, info); +} + +/** + * i40iw_create_hmc_objs - create all hmc objects for the device + * @iwdev: iwarp device + * @is_pf: true if the function is PF otherwise false + * + * Create the device hmc objects and allocate hmc pages + * Return 0 if successful, otherwise clean up and return error + */ +static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev, + bool is_pf) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_hmc_create_obj_info info; + enum i40iw_status_code status; + int i; + + memset(&info, 0, sizeof(info)); + info.hmc_info = dev->hmc_info; + info.is_pf = is_pf; + info.entry_type = iwdev->sd_type; + for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) { + info.rsrc_type = iw_hmc_obj_types[i]; + info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt; + info.add_sd_cnt = 0; + status = i40iw_create_hmc_obj_type(dev, &info); + if (status) { + i40iw_pr_err("create obj type %d status = %d\n", + iw_hmc_obj_types[i], status); + break; + } + } + if (!status) + return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0, + dev->hmc_fn_id, + true, true)); + + while (i) { + i--; + /* destroy the hmc objects of a given type */ + i40iw_close_hmc_objects_type(dev, + iw_hmc_obj_types[i], + dev->hmc_info, + is_pf, + false); + } + return status; +} + +/** + * i40iw_obj_aligned_mem - get aligned memory from device allocated memory + * @iwdev: iwarp device + * @memptr: points to the memory addresses + * @size: size of memory needed + * @mask: mask for the aligned memory + * + * Get aligned memory of the requested size and + * update the memptr to point to the new aligned memory + * Return 0 if successful, otherwise return no memory error + */ +enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev, + struct i40iw_dma_mem *memptr, + u32 size, + u32 mask) +{ + unsigned long va, newva; + unsigned long extra; + + va = (unsigned long)iwdev->obj_next.va; + newva = va; + if (mask) + newva = ALIGN(va, (mask + 1)); + extra = newva - va; + memptr->va = (u8 *)va + extra; + memptr->pa = iwdev->obj_next.pa + extra; + memptr->size = size; + if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size)) + return I40IW_ERR_NO_MEMORY; + + iwdev->obj_next.va = memptr->va + size; + iwdev->obj_next.pa = memptr->pa + size; + return 0; +} + +/** + * i40iw_create_cqp - create control qp + * @iwdev: iwarp device + * + * Return 0, if the cqp and all the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + u32 sqsize = I40IW_CQP_SW_SQSIZE_2048; + struct i40iw_dma_mem mem; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp_init_info cqp_init_info; + struct i40iw_cqp *cqp = &iwdev->cqp; + u16 maj_err, min_err; + int i; + + cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL); + if (!cqp->cqp_requests) + return I40IW_ERR_NO_MEMORY; + cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL); + if (!cqp->scratch_array) { + kfree(cqp->cqp_requests); + return I40IW_ERR_NO_MEMORY; + } + dev->cqp = &cqp->sc_cqp; + dev->cqp->dev = dev; + memset(&cqp_init_info, 0, sizeof(cqp_init_info)); + status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq, + (sizeof(struct i40iw_cqp_sq_wqe) * sqsize), + I40IW_CQP_ALIGNMENT); + if (status) + goto exit; + status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx), + I40IW_HOST_CTX_ALIGNMENT_MASK); + if (status) + goto exit; + dev->cqp->host_ctx_pa = mem.pa; + dev->cqp->host_ctx = mem.va; + /* populate the cqp init info */ + cqp_init_info.dev = dev; + cqp_init_info.sq_size = sqsize; + cqp_init_info.sq = cqp->sq.va; + cqp_init_info.sq_pa = cqp->sq.pa; + cqp_init_info.host_ctx_pa = mem.pa; + cqp_init_info.host_ctx = mem.va; + cqp_init_info.hmc_profile = iwdev->resource_profile; + cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs; + cqp_init_info.scratch_array = cqp->scratch_array; + status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info); + if (status) { + i40iw_pr_err("cqp init status %d\n", status); + goto exit; + } + status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err); + if (status) { + i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n", + status, maj_err, min_err); + goto exit; + } + spin_lock_init(&cqp->req_lock); + INIT_LIST_HEAD(&cqp->cqp_avail_reqs); + INIT_LIST_HEAD(&cqp->cqp_pending_reqs); + /* init the waitq of the cqp_requests and add them to the list */ + for (i = 0; i < sqsize; i++) { + init_waitqueue_head(&cqp->cqp_requests[i].waitq); + list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs); + } + return 0; +exit: + /* clean up the created resources */ + i40iw_destroy_cqp(iwdev, false); + return status; +} + +/** + * i40iw_create_ccq - create control cq + * @iwdev: iwarp device + * + * Return 0, if the ccq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_dma_mem mem; + enum i40iw_status_code status; + struct i40iw_ccq_init_info info; + struct i40iw_ccq *ccq = &iwdev->ccq; + + memset(&info, 0, sizeof(info)); + dev->ccq = &ccq->sc_cq; + dev->ccq->dev = dev; + info.dev = dev; + ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area); + ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE; + status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq, + ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT); + if (status) + goto exit; + status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size, + I40IW_SHADOWAREA_MASK); + if (status) + goto exit; + ccq->sc_cq.back_cq = (void *)ccq; + /* populate the ccq init info */ + info.cq_base = ccq->mem_cq.va; + info.cq_pa = ccq->mem_cq.pa; + info.num_elem = IW_CCQ_SIZE; + info.shadow_area = mem.va; + info.shadow_area_pa = mem.pa; + info.ceqe_mask = false; + info.ceq_id_valid = true; + info.shadow_read_threshold = 16; + status = dev->ccq_ops->ccq_init(dev->ccq, &info); + if (!status) + status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true); +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &ccq->mem_cq); + return status; +} + +/** + * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq + * @iwdev: iwarp device + * @msix_vec: interrupt vector information + * @iwceq: ceq associated with the vector + * @ceq_id: the id number of the iwceq + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq, + u32 ceq_id, + struct i40iw_msix_vector *msix_vec) +{ + enum i40iw_status_code status; + + if (iwdev->msix_shared && !ceq_id) { + tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); + status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev); + } else { + tasklet_init(&iwceq->dpc_tasklet, i40iw_ceq_dpc, (unsigned long)iwceq); + status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq); + } + + cpumask_clear(&msix_vec->mask); + cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); + irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + + if (status) { + i40iw_pr_err("ceq irq config fail\n"); + return I40IW_ERR_CONFIG; + } + msix_vec->ceq_id = ceq_id; + + return 0; +} + +/** + * i40iw_create_ceq - create completion event queue + * @iwdev: iwarp device + * @iwceq: pointer to the ceq resources to be created + * @ceq_id: the id number of the iwceq + * + * Return 0, if the ceq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev, + struct i40iw_ceq *iwceq, + u32 ceq_id) +{ + enum i40iw_status_code status; + struct i40iw_ceq_init_info info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + u64 scratch; + + memset(&info, 0, sizeof(info)); + info.ceq_id = ceq_id; + iwceq->iwdev = iwdev; + iwceq->mem.size = sizeof(struct i40iw_ceqe) * + iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size, + I40IW_CEQ_ALIGNMENT); + if (status) + goto exit; + info.ceq_id = ceq_id; + info.ceqe_base = iwceq->mem.va; + info.ceqe_pa = iwceq->mem.pa; + + info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + iwceq->sc_ceq.ceq_id = ceq_id; + info.dev = dev; + scratch = (uintptr_t)&iwdev->cqp.sc_cqp; + status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info); + if (!status) + status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch); + +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &iwceq->mem); + return status; +} + +void i40iw_request_reset(struct i40iw_device *iwdev) +{ + struct i40e_info *ldev = iwdev->ldev; + + ldev->ops->request_reset(ldev, iwdev->client, 1); +} + +/** + * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources + * @iwdev: iwarp device + * @ldev: i40e lan device + * + * Allocate a list for all device completion event queues + * Create the ceq's and configure their msix interrupt vectors + * Return 0, if at least one ceq is successfully set up, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + u32 i; + u32 ceq_id; + struct i40iw_ceq *iwceq; + struct i40iw_msix_vector *msix_vec; + enum i40iw_status_code status = 0; + u32 num_ceqs; + + if (ldev && ldev->ops && ldev->ops->setup_qvlist) { + status = ldev->ops->setup_qvlist(ldev, &i40iw_client, + iwdev->iw_qvlist); + if (status) + goto exit; + } else { + status = I40IW_ERR_BAD_PTR; + goto exit; + } + + num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs); + iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL); + if (!iwdev->ceqlist) { + status = I40IW_ERR_NO_MEMORY; + goto exit; + } + i = (iwdev->msix_shared) ? 0 : 1; + for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) { + iwceq = &iwdev->ceqlist[ceq_id]; + status = i40iw_create_ceq(iwdev, iwceq, ceq_id); + if (status) { + i40iw_pr_err("create ceq status = %d\n", status); + break; + } + + msix_vec = &iwdev->iw_msixtbl[i]; + iwceq->irq = msix_vec->irq; + iwceq->msix_idx = msix_vec->idx; + status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); + if (status) { + i40iw_destroy_ceq(iwdev, iwceq); + break; + } + i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); + iwdev->ceqs_count++; + } +exit: + if (status && !iwdev->ceqs_count) { + kfree(iwdev->ceqlist); + iwdev->ceqlist = NULL; + return status; + } else { + iwdev->sc_dev.ceq_valid = true; + return 0; + } + +} + +/** + * i40iw_configure_aeq_vector - set up the msix vector for aeq + * @iwdev: iwarp device + * + * Allocate interrupt resources and enable irq handling + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev) +{ + struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl; + u32 ret = 0; + + if (!iwdev->msix_shared) { + tasklet_init(&iwdev->dpc_tasklet, i40iw_dpc, (unsigned long)iwdev); + ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev); + } + if (ret) { + i40iw_pr_err("aeq irq config fail\n"); + return I40IW_ERR_CONFIG; + } + + return 0; +} + +/** + * i40iw_create_aeq - create async event queue + * @iwdev: iwarp device + * + * Return 0, if the aeq and the resources associated with it + * are successfully created, otherwise return error + */ +static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + struct i40iw_aeq_init_info info; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_aeq *aeq = &iwdev->aeq; + u64 scratch = 0; + u32 aeq_size; + + aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt + + iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt; + memset(&info, 0, sizeof(info)); + aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size; + status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size, + I40IW_AEQ_ALIGNMENT); + if (status) + goto exit; + + info.aeqe_base = aeq->mem.va; + info.aeq_elem_pa = aeq->mem.pa; + info.elem_cnt = aeq_size; + info.dev = dev; + status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info); + if (status) + goto exit; + status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1); + if (!status) + status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq); +exit: + if (status) + i40iw_free_dma_mem(dev->hw, &aeq->mem); + return status; +} + +/** + * i40iw_setup_aeq - set up the device aeq + * @iwdev: iwarp device + * + * Create the aeq and configure its msix interrupt vector + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + status = i40iw_create_aeq(iwdev); + if (status) + return status; + + status = i40iw_configure_aeq_vector(iwdev); + if (status) { + i40iw_destroy_aeq(iwdev); + return status; + } + + if (!iwdev->msix_shared) + i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx); + return 0; +} + +/** + * i40iw_initialize_ilq - create iwarp local queue for cm + * @iwdev: iwarp device + * + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev) +{ + struct i40iw_puda_rsrc_info info; + enum i40iw_status_code status; + + memset(&info, 0, sizeof(info)); + info.type = I40IW_PUDA_RSRC_TYPE_ILQ; + info.cq_id = 1; + info.qp_id = 0; + info.count = 1; + info.pd_id = 1; + info.sq_size = 8192; + info.rq_size = 8192; + info.buf_size = 1024; + info.tx_buf_cnt = 16384; + info.receive = i40iw_receive_ilq; + info.xmit_complete = i40iw_free_sqbuf; + status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); + if (status) + i40iw_pr_err("ilq create fail\n"); + return status; +} + +/** + * i40iw_initialize_ieq - create iwarp exception queue + * @iwdev: iwarp device + * + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev) +{ + struct i40iw_puda_rsrc_info info; + enum i40iw_status_code status; + + memset(&info, 0, sizeof(info)); + info.type = I40IW_PUDA_RSRC_TYPE_IEQ; + info.cq_id = 2; + info.qp_id = iwdev->vsi.exception_lan_queue; + info.count = 1; + info.pd_id = 2; + info.sq_size = 8192; + info.rq_size = 8192; + info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN; + info.tx_buf_cnt = 4096; + status = i40iw_puda_create_rsrc(&iwdev->vsi, &info); + if (status) + i40iw_pr_err("ieq create fail\n"); + return status; +} + +/** + * i40iw_reinitialize_ieq - destroy and re-create ieq + * @dev: iwarp device + */ +void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false); + if (i40iw_initialize_ieq(iwdev)) { + iwdev->reset = true; + i40iw_request_reset(iwdev); + } +} + +/** + * i40iw_hmc_setup - create hmc objects for the device + * @iwdev: iwarp device + * + * Set up the device private memory space for the number and size of + * the hmc objects and create the objects + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev) +{ + enum i40iw_status_code status; + + iwdev->sd_type = I40IW_SD_TYPE_DIRECT; + status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT); + if (status) + goto exit; + status = i40iw_create_hmc_objs(iwdev, true); + if (status) + goto exit; + iwdev->init_state = HMC_OBJS_CREATED; +exit: + return status; +} + +/** + * i40iw_del_init_mem - deallocate memory resources + * @iwdev: iwarp device + */ +static void i40iw_del_init_mem(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem); + kfree(dev->hmc_info->sd_table.sd_entry); + dev->hmc_info->sd_table.sd_entry = NULL; + kfree(iwdev->mem_resources); + iwdev->mem_resources = NULL; + kfree(iwdev->ceqlist); + iwdev->ceqlist = NULL; + kfree(iwdev->iw_msixtbl); + iwdev->iw_msixtbl = NULL; + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; +} + +/** + * i40iw_del_macip_entry - remove a mac ip address entry from the hw table + * @iwdev: iwarp device + * @idx: the index of the mac ip address to delete + */ +static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx) +{ + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return; + } + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx; + cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Del MAC Ip entry fail"); +} + +/** + * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table + * @iwdev: iwarp device + * @mac_addr: pointer to mac address + * @idx: the index of the mac ip address to add + */ +static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev, + u8 *mac_addr, + u8 idx) +{ + struct i40iw_local_mac_ipaddr_entry_info *info; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return I40IW_ERR_NO_MEMORY; + } + + cqp_info = &cqp_request->info; + + cqp_info->post_sq = 1; + info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info; + ether_addr_copy(info->mac_addr, mac_addr); + info->entry_idx = idx; + cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Add MAC Ip entry fail"); + return status; +} + +/** + * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry + * @iwdev: iwarp device + * @mac_ip_tbl_idx: the index of the new mac ip address + * + * Allocate a mac ip address entry and update the mac_ip_tbl_idx + * to hold the index of the newly created mac ip address + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev, + u16 *mac_ip_tbl_idx) +{ + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status = 0; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + i40iw_pr_err("cqp_request memory failed\n"); + return I40IW_ERR_NO_MEMORY; + } + + /* increment refcount, because we need the cqp request ret value */ + atomic_inc(&cqp_request->refcount); + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY; + cqp_info->post_sq = 1; + cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp; + cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val; + else + i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail"); + /* decrement refcount and free the cqp request, if no longer used */ + i40iw_put_cqp_request(iwcqp, cqp_request); + return status; +} + +/** + * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry + * @iwdev: iwarp device + * @macaddr: pointer to mac address + * + * Allocate a mac ip address entry and add it to the hw table + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev, + u8 *macaddr) +{ + enum i40iw_status_code status; + + status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx); + if (!status) { + status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr, + (u8)iwdev->mac_ip_table_idx); + if (status) + i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); + } + return status; +} + +/** + * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table + * @iwdev: iwarp device + */ +static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev) +{ + struct net_device *ip_dev; + struct inet6_dev *idev; + struct inet6_ifaddr *ifp, *tmp; + u32 local_ipaddr6[4]; + + rcu_read_lock(); + for_each_netdev_rcu(&init_net, ip_dev) { + if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) && + (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) || + (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) { + idev = __in6_dev_get(ip_dev); + if (!idev) { + i40iw_pr_err("ipv6 inet device not found\n"); + break; + } + list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) { + i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr, + rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr); + i40iw_copy_ip_ntohl(local_ipaddr6, + ifp->addr.in6_u.u6_addr32); + i40iw_manage_arp_cache(iwdev, + ip_dev->dev_addr, + local_ipaddr6, + false, + I40IW_ARP_ADD); + } + } + } + rcu_read_unlock(); +} + +/** + * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table + * @iwdev: iwarp device + */ +static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev) +{ + struct net_device *dev; + struct in_device *idev; + bool got_lock = true; + u32 ip_addr; + + if (!rtnl_trylock()) + got_lock = false; + + for_each_netdev(&init_net, dev) { + if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) && + (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) || + (dev == iwdev->netdev)) && (dev->flags & IFF_UP)) { + idev = in_dev_get(dev); + for_ifa(idev) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM, + "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address, + rdma_vlan_dev_vlan_id(dev), dev->dev_addr); + + ip_addr = ntohl(ifa->ifa_address); + i40iw_manage_arp_cache(iwdev, + dev->dev_addr, + &ip_addr, + true, + I40IW_ARP_ADD); + } + endfor_ifa(idev); + in_dev_put(idev); + } + } + if (got_lock) + rtnl_unlock(); +} + +/** + * i40iw_add_mac_ip - add mac and ip addresses + * @iwdev: iwarp device + * + * Create and add a mac ip address entry to the hw table and + * ipv4/ipv6 addresses to the arp cache + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev) +{ + struct net_device *netdev = iwdev->netdev; + enum i40iw_status_code status; + + status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr); + if (status) + return status; + i40iw_add_ipv4_addr(iwdev); + i40iw_add_ipv6_addr(iwdev); + return 0; +} + +/** + * i40iw_wait_pe_ready - Check if firmware is ready + * @hw: provides access to registers + */ +static void i40iw_wait_pe_ready(struct i40iw_hw *hw) +{ + u32 statusfw; + u32 statuscpu0; + u32 statuscpu1; + u32 statuscpu2; + u32 retrycount = 0; + + do { + statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS); + i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw); + statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0); + i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0); + statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1); + i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n", + __LINE__, statuscpu1); + statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2); + i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n", + __LINE__, statuscpu2); + if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80)) + break; /* SUCCESS */ + msleep(1000); + retrycount++; + } while (retrycount < 14); + i40iw_wr32(hw, 0xb4040, 0x4C104C5); +} + +/** + * i40iw_initialize_dev - initialize device + * @iwdev: iwarp device + * @ldev: lan device information + * + * Allocate memory for the hmc objects and initialize iwdev + * Return 0 if successful, otherwise clean up the resources + * and return error + */ +static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + enum i40iw_status_code status; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_device_init_info info; + struct i40iw_vsi_init_info vsi_info; + struct i40iw_dma_mem mem; + struct i40iw_l2params l2params; + u32 size; + struct i40iw_vsi_stats_info stats_info; + u16 last_qset = I40IW_NO_QSET; + u16 qset; + u32 i; + + memset(&l2params, 0, sizeof(l2params)); + memset(&info, 0, sizeof(info)); + size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) + + (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX); + iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL); + if (!iwdev->hmc_info_mem) + return I40IW_ERR_NO_MEMORY; + + iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem; + dev->hmc_info = &iwdev->hw.hmc; + dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1); + status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); + if (status) + goto error; + info.fpm_query_buf_pa = mem.pa; + info.fpm_query_buf = mem.va; + status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, + I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); + if (status) + goto error; + info.fpm_commit_buf_pa = mem.pa; + info.fpm_commit_buf = mem.va; + info.hmc_fn_id = ldev->fid; + info.is_pf = (ldev->ftype) ? false : true; + info.bar0 = ldev->hw_addr; + info.hw = &iwdev->hw; + info.debug_mask = debug; + l2params.mtu = + (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU; + for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) { + qset = ldev->params.qos.prio_qos[i].qs_handle; + l2params.qs_handle_list[i] = qset; + if (last_qset == I40IW_NO_QSET) + last_qset = qset; + else if ((qset != last_qset) && (qset != I40IW_NO_QSET)) + iwdev->dcb = true; + } + i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb); + info.vchnl_send = i40iw_virtchnl_send; + status = i40iw_device_init(&iwdev->sc_dev, &info); + + if (status) + goto error; + memset(&vsi_info, 0, sizeof(vsi_info)); + vsi_info.dev = &iwdev->sc_dev; + vsi_info.back_vsi = (void *)iwdev; + vsi_info.params = &l2params; + vsi_info.exception_lan_queue = 1; + i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info); + + if (dev->is_pf) { + memset(&stats_info, 0, sizeof(stats_info)); + stats_info.fcn_id = ldev->fid; + stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); + if (!stats_info.pestat) { + status = I40IW_ERR_NO_MEMORY; + goto error; + } + stats_info.stats_initialize = true; + if (stats_info.pestat) + i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); + } + return status; +error: + kfree(iwdev->hmc_info_mem); + iwdev->hmc_info_mem = NULL; + return status; +} + +/** + * i40iw_register_notifiers - register tcp ip notifiers + */ +static void i40iw_register_notifiers(void) +{ + register_inetaddr_notifier(&i40iw_inetaddr_notifier); + register_inet6addr_notifier(&i40iw_inetaddr6_notifier); + register_netevent_notifier(&i40iw_net_notifier); + register_netdevice_notifier(&i40iw_netdevice_notifier); +} + +/** + * i40iw_unregister_notifiers - unregister tcp ip notifiers + */ + +static void i40iw_unregister_notifiers(void) +{ + unregister_netevent_notifier(&i40iw_net_notifier); + unregister_inetaddr_notifier(&i40iw_inetaddr_notifier); + unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); + unregister_netdevice_notifier(&i40iw_netdevice_notifier); +} + +/** + * i40iw_save_msix_info - copy msix vector information to iwarp device + * @iwdev: iwarp device + * @ldev: lan device information + * + * Allocate iwdev msix table and copy the ldev msix info to the table + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, + struct i40e_info *ldev) +{ + struct i40e_qvlist_info *iw_qvlist; + struct i40e_qv_info *iw_qvinfo; + u32 ceq_idx; + u32 i; + u32 size; + + if (!ldev->msix_count) { + i40iw_pr_err("No MSI-X vectors\n"); + return I40IW_ERR_CONFIG; + } + + iwdev->msix_count = ldev->msix_count; + + size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; + size += sizeof(struct i40e_qvlist_info); + size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1; + iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL); + + if (!iwdev->iw_msixtbl) + return I40IW_ERR_NO_MEMORY; + iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]); + iw_qvlist = iwdev->iw_qvlist; + iw_qvinfo = iw_qvlist->qv_info; + iw_qvlist->num_vectors = iwdev->msix_count; + if (iwdev->msix_count <= num_online_cpus()) + iwdev->msix_shared = true; + for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) { + iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry; + iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector; + iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx; + if (i == 0) { + iw_qvinfo->aeq_idx = 0; + if (iwdev->msix_shared) + iw_qvinfo->ceq_idx = ceq_idx++; + else + iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX; + } else { + iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX; + iw_qvinfo->ceq_idx = ceq_idx++; + } + iw_qvinfo->itr_idx = 3; + iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx; + } + return 0; +} + +/** + * i40iw_deinit_device - clean up the device resources + * @iwdev: iwarp device + * + * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, + * destroy the device queues and free the pble and the hmc objects + */ +static void i40iw_deinit_device(struct i40iw_device *iwdev) +{ + struct i40e_info *ldev = iwdev->ldev; + + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + + i40iw_pr_info("state = %d\n", iwdev->init_state); + if (iwdev->param_wq) + destroy_workqueue(iwdev->param_wq); + + switch (iwdev->init_state) { + case RDMA_DEV_REGISTERED: + iwdev->iw_status = 0; + i40iw_port_ibevent(iwdev); + i40iw_destroy_rdma_device(iwdev->iwibdev); + /* fallthrough */ + case IP_ADDR_REGISTERED: + if (!iwdev->reset) + i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); + /* fallthrough */ + /* fallthrough */ + case PBLE_CHUNK_MEM: + i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + /* fallthrough */ + case CEQ_CREATED: + i40iw_dele_ceqs(iwdev); + /* fallthrough */ + case AEQ_CREATED: + i40iw_destroy_aeq(iwdev); + /* fallthrough */ + case IEQ_CREATED: + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); + /* fallthrough */ + case ILQ_CREATED: + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); + /* fallthrough */ + case CCQ_CREATED: + i40iw_destroy_ccq(iwdev); + /* fallthrough */ + case HMC_OBJS_CREATED: + i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); + /* fallthrough */ + case CQP_CREATED: + i40iw_destroy_cqp(iwdev, true); + /* fallthrough */ + case INITIAL_STATE: + i40iw_cleanup_cm_core(&iwdev->cm_core); + if (iwdev->vsi.pestat) { + i40iw_vsi_stats_free(&iwdev->vsi); + kfree(iwdev->vsi.pestat); + } + i40iw_del_init_mem(iwdev); + break; + case INVALID_STATE: + /* fallthrough */ + default: + i40iw_pr_err("bad init_state = %d\n", iwdev->init_state); + break; + } + + i40iw_del_handler(i40iw_find_i40e_handler(ldev)); + kfree(iwdev->hdl); +} + +/** + * i40iw_setup_init_state - set up the initial device struct + * @hdl: handler for iwarp device - one per instance + * @ldev: lan device information + * @client: iwarp client information, provided during registration + * + * Initialize the iwarp device and its hdl information + * using the ldev and client information + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl, + struct i40e_info *ldev, + struct i40e_client *client) +{ + struct i40iw_device *iwdev = &hdl->device; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + + memcpy(&hdl->ldev, ldev, sizeof(*ldev)); + + iwdev->mpa_version = mpa_version; + iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ? + (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT : + I40IW_HMC_PROFILE_DEFAULT; + iwdev->max_rdma_vfs = + (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0; + iwdev->max_enabled_vfs = iwdev->max_rdma_vfs; + iwdev->netdev = ldev->netdev; + hdl->client = client; + if (!ldev->ftype) + iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET; + else + iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET; + + status = i40iw_save_msix_info(iwdev, ldev); + if (status) + return status; + iwdev->hw.dev_context = (void *)ldev->pcidev; + iwdev->hw.hw_addr = ldev->hw_addr; + status = i40iw_allocate_dma_mem(&iwdev->hw, + &iwdev->obj_mem, 8192, 4096); + if (status) + goto exit; + iwdev->obj_next = iwdev->obj_mem; + + init_waitqueue_head(&iwdev->vchnl_waitq); + init_waitqueue_head(&dev->vf_reqs); + init_waitqueue_head(&iwdev->close_wq); + + status = i40iw_initialize_dev(iwdev, ldev); +exit: + if (status) { + kfree(iwdev->iw_msixtbl); + i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem); + iwdev->iw_msixtbl = NULL; + } + return status; +} + +/** + * i40iw_get_used_rsrc - determine resources used internally + * @iwdev: iwarp device + * + * Called after internal allocations + */ +static void i40iw_get_used_rsrc(struct i40iw_device *iwdev) +{ + iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0); + iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0); + iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0); + iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0); +} + +/** + * i40iw_open - client interface operation open for iwarp/uda device + * @ldev: lan device information + * @client: iwarp client information, provided during registration + * + * Called by the lan driver during the processing of client register + * Create device resources, set up queues, pble and hmc objects and + * register the device with the ib verbs interface + * Return 0 if successful, otherwise return error + */ +static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) +{ + struct i40iw_device *iwdev; + struct i40iw_sc_dev *dev; + enum i40iw_status_code status; + struct i40iw_handler *hdl; + + hdl = i40iw_find_netdev(ldev->netdev); + if (hdl) + return 0; + + hdl = kzalloc(sizeof(*hdl), GFP_KERNEL); + if (!hdl) + return -ENOMEM; + iwdev = &hdl->device; + iwdev->hdl = hdl; + dev = &iwdev->sc_dev; + i40iw_setup_cm_core(iwdev); + + dev->back_dev = (void *)iwdev; + iwdev->ldev = &hdl->ldev; + iwdev->client = client; + mutex_init(&iwdev->pbl_mutex); + i40iw_add_handler(hdl); + + do { + status = i40iw_setup_init_state(hdl, ldev, client); + if (status) + break; + iwdev->init_state = INITIAL_STATE; + if (dev->is_pf) + i40iw_wait_pe_ready(dev->hw); + status = i40iw_create_cqp(iwdev); + if (status) + break; + iwdev->init_state = CQP_CREATED; + status = i40iw_hmc_setup(iwdev); + if (status) + break; + status = i40iw_create_ccq(iwdev); + if (status) + break; + iwdev->init_state = CCQ_CREATED; + status = i40iw_initialize_ilq(iwdev); + if (status) + break; + iwdev->init_state = ILQ_CREATED; + status = i40iw_initialize_ieq(iwdev); + if (status) + break; + iwdev->init_state = IEQ_CREATED; + status = i40iw_setup_aeq(iwdev); + if (status) + break; + iwdev->init_state = AEQ_CREATED; + status = i40iw_setup_ceqs(iwdev, ldev); + if (status) + break; + iwdev->init_state = CEQ_CREATED; + status = i40iw_initialize_hw_resources(iwdev); + if (status) + break; + i40iw_get_used_rsrc(iwdev); + dev->ccq_ops->ccq_arm(dev->ccq); + status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); + if (status) + break; + iwdev->init_state = PBLE_CHUNK_MEM; + iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); + status = i40iw_add_mac_ip(iwdev); + if (status) + break; + iwdev->init_state = IP_ADDR_REGISTERED; + if (i40iw_register_rdma_device(iwdev)) { + i40iw_pr_err("register rdma device fail\n"); + break; + }; + + iwdev->init_state = RDMA_DEV_REGISTERED; + iwdev->iw_status = 1; + i40iw_port_ibevent(iwdev); + iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM); + if(iwdev->param_wq == NULL) + break; + i40iw_pr_info("i40iw_open completed\n"); + return 0; + } while (0); + + i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); + i40iw_deinit_device(iwdev); + return -ERESTART; +} + +/** + * i40iw_l2params_worker - worker for l2 params change + * @work: work pointer for l2 params + */ +static void i40iw_l2params_worker(struct work_struct *work) +{ + struct l2params_work *dwork = + container_of(work, struct l2params_work, work); + struct i40iw_device *iwdev = dwork->iwdev; + + i40iw_change_l2params(&iwdev->vsi, &dwork->l2params); + atomic_dec(&iwdev->params_busy); + kfree(work); +} + +/** + * i40iw_l2param_change - handle qs handles for qos and mss change + * @ldev: lan device information + * @client: client for paramater change + * @params: new parameters from L2 + */ +static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client, + struct i40e_params *params) +{ + struct i40iw_handler *hdl; + struct i40iw_l2params *l2params; + struct l2params_work *work; + struct i40iw_device *iwdev; + int i; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + iwdev = &hdl->device; + + if (atomic_read(&iwdev->params_busy)) + return; + + + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return; + + atomic_inc(&iwdev->params_busy); + + work->iwdev = iwdev; + l2params = &work->l2params; + for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) + l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle; + + l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu; + + INIT_WORK(&work->work, i40iw_l2params_worker); + queue_work(iwdev->param_wq, &work->work); +} + +/** + * i40iw_close - client interface operation close for iwarp/uda device + * @ldev: lan device information + * @client: client to close + * + * Called by the lan driver during the processing of client unregister + * Destroy and clean up the driver resources + */ +static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset) +{ + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + iwdev = &hdl->device; + iwdev->closing = true; + + if (reset) + iwdev->reset = true; + + i40iw_cm_teardown_connections(iwdev, NULL, NULL, true); + destroy_workqueue(iwdev->virtchnl_wq); + i40iw_deinit_device(iwdev); +} + +/** + * i40iw_vf_reset - process VF reset + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id + * + * Called when a VF is reset by the PF + * Destroy and clean up the VF resources + */ +static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + struct i40iw_hmc_fcn_info hmc_fcn_info; + struct i40iw_virt_mem vf_dev_mem; + struct i40iw_vfdev *tmp_vfdev; + unsigned int i; + unsigned long flags; + struct i40iw_device *iwdev; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + dev = &hdl->device.sc_dev; + iwdev = (struct i40iw_device *)dev->back_dev; + + for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) { + if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id)) + continue; + /* free all resources allocated on behalf of vf */ + tmp_vfdev = dev->vf_dev[i]; + spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags); + dev->vf_dev[i] = NULL; + spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags); + i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false); + /* remove vf hmc function */ + memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info)); + hmc_fcn_info.vf_id = vf_id; + hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx; + hmc_fcn_info.free_fcn = true; + i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info); + /* free vf_dev */ + vf_dev_mem.va = tmp_vfdev; + vf_dev_mem.size = sizeof(struct i40iw_vfdev) + + sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX; + i40iw_free_virt_mem(dev->hw, &vf_dev_mem); + break; + } +} + +/** + * i40iw_vf_enable - enable a number of VFs + * @ldev: lan device information + * @client: client interface instance + * @num_vfs: number of VFs for the PF + * + * Called when the number of VFs changes + */ +static void i40iw_vf_enable(struct i40e_info *ldev, + struct i40e_client *client, + u32 num_vfs) +{ + struct i40iw_handler *hdl; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return; + + if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT) + hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT; + else + hdl->device.max_enabled_vfs = num_vfs; +} + +/** + * i40iw_vf_capable - check if VF capable + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id + * + * Return 1 if a VF slot is available or if VF is already RDMA enabled + * Return 0 otherwise + */ +static int i40iw_vf_capable(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + unsigned int i; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return 0; + + dev = &hdl->device.sc_dev; + + for (i = 0; i < hdl->device.max_enabled_vfs; i++) { + if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id)) + return 1; + } + + return 0; +} + +/** + * i40iw_virtchnl_receive - receive a message through the virtual channel + * @ldev: lan device information + * @client: client interface instance + * @vf_id: virtual function id associated with the message + * @msg: message buffer pointer + * @len: length of the message + * + * Invoke virtual channel receive operation for the given msg + * Return 0 if successful, otherwise return error + */ +static int i40iw_virtchnl_receive(struct i40e_info *ldev, + struct i40e_client *client, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_handler *hdl; + struct i40iw_sc_dev *dev; + struct i40iw_device *iwdev; + int ret_code = I40IW_NOT_SUPPORTED; + + if (!len || !msg) + return I40IW_ERR_PARAM; + + hdl = i40iw_find_i40e_handler(ldev); + if (!hdl) + return I40IW_ERR_PARAM; + + dev = &hdl->device.sc_dev; + iwdev = dev->back_dev; + + if (dev->vchnl_if.vchnl_recv) { + ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len); + if (!dev->is_pf) { + atomic_dec(&iwdev->vchnl_msgs); + wake_up(&iwdev->vchnl_waitq); + } + } + return ret_code; +} + +/** + * i40iw_vf_clear_to_send - wait to send virtual channel message + * @dev: iwarp device * + * Wait for until virtual channel is clear + * before sending the next message + * + * Returns false if error + * Returns true if clear to send + */ +bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev) +{ + struct i40iw_device *iwdev; + wait_queue_entry_t wait; + + iwdev = dev->back_dev; + + if (!wq_has_sleeper(&dev->vf_reqs) && + (atomic_read(&iwdev->vchnl_msgs) == 0)) + return true; /* virtual channel is clear */ + + init_wait(&wait); + add_wait_queue_exclusive(&dev->vf_reqs, &wait); + + if (!wait_event_timeout(dev->vf_reqs, + (atomic_read(&iwdev->vchnl_msgs) == 0), + I40IW_VCHNL_EVENT_TIMEOUT)) + dev->vchnl_up = false; + + remove_wait_queue(&dev->vf_reqs, &wait); + + return dev->vchnl_up; +} + +/** + * i40iw_virtchnl_send - send a message through the virtual channel + * @dev: iwarp device + * @vf_id: virtual function id associated with the message + * @msg: virtual channel message buffer pointer + * @len: length of the message + * + * Invoke virtual channel send operation for the given msg + * Return 0 if successful, otherwise return error + */ +static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_device *iwdev; + struct i40e_info *ldev; + + if (!dev || !dev->back_dev) + return I40IW_ERR_BAD_PTR; + + iwdev = dev->back_dev; + ldev = iwdev->ldev; + + if (ldev && ldev->ops && ldev->ops->virtchnl_send) + return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len); + return I40IW_ERR_BAD_PTR; +} + +/* client interface functions */ +static const struct i40e_client_ops i40e_ops = { + .open = i40iw_open, + .close = i40iw_close, + .l2_param_change = i40iw_l2param_change, + .virtchnl_receive = i40iw_virtchnl_receive, + .vf_reset = i40iw_vf_reset, + .vf_enable = i40iw_vf_enable, + .vf_capable = i40iw_vf_capable +}; + +/** + * i40iw_init_module - driver initialization function + * + * First function to call when the driver is loaded + * Register the driver as i40e client and port mapper client + */ +static int __init i40iw_init_module(void) +{ + int ret; + + memset(&i40iw_client, 0, sizeof(i40iw_client)); + i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR; + i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR; + i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD; + i40iw_client.ops = &i40e_ops; + memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH); + i40iw_client.type = I40E_CLIENT_IWARP; + spin_lock_init(&i40iw_handler_lock); + ret = i40e_register_client(&i40iw_client); + i40iw_register_notifiers(); + + return ret; +} + +/** + * i40iw_exit_module - driver exit clean up function + * + * The function is called just before the driver is unloaded + * Unregister the driver as i40e client and port mapper client + */ +static void __exit i40iw_exit_module(void) +{ + i40iw_unregister_notifiers(); + i40e_unregister_client(&i40iw_client); +} + +module_init(i40iw_init_module); +module_exit(i40iw_exit_module); diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h new file mode 100644 index 000000000..f27be3e78 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h @@ -0,0 +1,217 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_OSDEP_H +#define I40IW_OSDEP_H + +#include <linux/kernel.h> +#include <linux/string.h> +#include <linux/bitops.h> +#include <net/tcp.h> +#include <crypto/hash.h> +/* get readq/writeq support for 32 bit kernels, use the low-first version */ +#include <linux/io-64-nonatomic-lo-hi.h> + +#define STATS_TIMER_DELAY 1000 + +static inline void set_64bit_val(u64 *wqe_words, u32 byte_index, u64 value) +{ + wqe_words[byte_index >> 3] = value; +} + +/** + * set_32bit_val - set 32 value to hw wqe + * @wqe_words: wqe addr to write + * @byte_index: index in wqe + * @value: value to write + **/ +static inline void set_32bit_val(u32 *wqe_words, u32 byte_index, u32 value) +{ + wqe_words[byte_index >> 2] = value; +} + +/** + * get_64bit_val - read 64 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to read from + * @value: read value + **/ +static inline void get_64bit_val(u64 *wqe_words, u32 byte_index, u64 *value) +{ + *value = wqe_words[byte_index >> 3]; +} + +/** + * get_32bit_val - read 32 bit value from wqe + * @wqe_words: wqe addr + * @byte_index: index to reaad from + * @value: return 32 bit value + **/ +static inline void get_32bit_val(u32 *wqe_words, u32 byte_index, u32 *value) +{ + *value = wqe_words[byte_index >> 2]; +} + +struct i40iw_dma_mem { + void *va; + dma_addr_t pa; + u32 size; +} __packed; + +struct i40iw_virt_mem { + void *va; + u32 size; +} __packed; + +#define i40iw_debug(h, m, s, ...) \ +do { \ + if (((m) & (h)->debug_mask)) \ + pr_info("i40iw " s, ##__VA_ARGS__); \ +} while (0) + +#define i40iw_flush(a) readl((a)->hw_addr + I40E_GLGEN_STAT) + +#define I40E_GLHMC_VFSDCMD(_i) (0x000C8000 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDCMD_MAX_INDEX 31 +#define I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT 0 +#define I40E_GLHMC_VFSDCMD_PMSDIDX_MASK (0xFFF \ + << I40E_GLHMC_VFSDCMD_PMSDIDX_SHIFT) +#define I40E_GLHMC_VFSDCMD_PF_SHIFT 16 +#define I40E_GLHMC_VFSDCMD_PF_MASK (0xF << I40E_GLHMC_VFSDCMD_PF_SHIFT) +#define I40E_GLHMC_VFSDCMD_VF_SHIFT 20 +#define I40E_GLHMC_VFSDCMD_VF_MASK (0x1FF << I40E_GLHMC_VFSDCMD_VF_SHIFT) +#define I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT 29 +#define I40E_GLHMC_VFSDCMD_PMF_TYPE_MASK (0x3 \ + << I40E_GLHMC_VFSDCMD_PMF_TYPE_SHIFT) +#define I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT 31 +#define I40E_GLHMC_VFSDCMD_PMSDWR_MASK (0x1 << I40E_GLHMC_VFSDCMD_PMSDWR_SHIFT) + +#define I40E_GLHMC_VFSDDATAHIGH(_i) (0x000C8200 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDDATAHIGH_MAX_INDEX 31 +#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT 0 +#define I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF \ + << I40E_GLHMC_VFSDDATAHIGH_PMSDDATAHIGH_SHIFT) + +#define I40E_GLHMC_VFSDDATALOW(_i) (0x000C8100 + ((_i) * 4)) \ + /* _i=0...31 */ +#define I40E_GLHMC_VFSDDATALOW_MAX_INDEX 31 +#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_GLHMC_VFSDDATALOW_PMSDVALID_MASK (0x1 \ + << I40E_GLHMC_VFSDDATALOW_PMSDVALID_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_GLHMC_VFSDDATALOW_PMSDTYPE_MASK (0x1 \ + << I40E_GLHMC_VFSDDATALOW_PMSDTYPE_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_MASK (0x3FF \ + << I40E_GLHMC_VFSDDATALOW_PMSDBPCOUNT_SHIFT) +#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT 12 +#define I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_MASK (0xFFFFF \ + << I40E_GLHMC_VFSDDATALOW_PMSDDATALOW_SHIFT) + +#define I40E_GLPE_FWLDSTATUS 0x0000D200 +#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT 0 +#define I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_LOAD_REQUESTED_SHIFT) +#define I40E_GLPE_FWLDSTATUS_DONE_SHIFT 1 +#define I40E_GLPE_FWLDSTATUS_DONE_MASK (0x1 << I40E_GLPE_FWLDSTATUS_DONE_SHIFT) +#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT 2 +#define I40E_GLPE_FWLDSTATUS_CQP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_CQP_FAIL_SHIFT) +#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT 3 +#define I40E_GLPE_FWLDSTATUS_TEP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_TEP_FAIL_SHIFT) +#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT 4 +#define I40E_GLPE_FWLDSTATUS_OOP_FAIL_MASK (0x1 \ + << I40E_GLPE_FWLDSTATUS_OOP_FAIL_SHIFT) + +struct i40iw_sc_dev; +struct i40iw_sc_qp; +struct i40iw_puda_buf; +struct i40iw_puda_completion_info; +struct i40iw_update_sds_info; +struct i40iw_hmc_fcn_info; +struct i40iw_virtchnl_work_info; +struct i40iw_manage_vf_pble_info; +struct i40iw_device; +struct i40iw_hmc_info; +struct i40iw_hw; + +u8 __iomem *i40iw_get_hw_addr(void *dev); +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev); +bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev); +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, void *addr, + u32 length, u32 value); +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, struct i40iw_puda_buf *buf); +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum); +void i40iw_free_hash_desc(struct shash_desc *); +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **); +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *info); +enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev, + struct i40iw_hmc_fcn_info *hmcfcninfo); +enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id); +enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *mem); +enum i40iw_status_code i40iw_cqp_manage_vf_pble_bp(struct i40iw_sc_dev *dev, + struct i40iw_manage_vf_pble_info *info); +void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); +void *i40iw_remove_head(struct list_head *list); +void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); + +void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); +void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); +void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp); +void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp); + +enum i40iw_status_code i40iw_hw_manage_vf_pble_bp(struct i40iw_device *iwdev, + struct i40iw_manage_vf_pble_info *info, + bool wait); +struct i40iw_sc_vsi; +void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi); +void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi); +#define i40iw_mmiowb() mmiowb() +void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value); +u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg); +#endif /* _I40IW_OSDEP_H_ */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h new file mode 100644 index 000000000..11d3a2a72 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h @@ -0,0 +1,128 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_P_H +#define I40IW_P_H + +#define PAUSE_TIMER_VALUE 0xFFFF +#define REFRESH_THRESHOLD 0x7FFF +#define HIGH_THRESHOLD 0x800 +#define LOW_THRESHOLD 0x200 +#define ALL_TC2PFC 0xFF +#define CQP_COMPL_WAIT_TIME 0x3E8 +#define CQP_TIMEOUT_THRESHOLD 5 + +void i40iw_debug_buf(struct i40iw_sc_dev *dev, enum i40iw_debug_flag mask, + char *desc, u64 *buf, u32 size); +/* init operations */ +enum i40iw_status_code i40iw_device_init(struct i40iw_sc_dev *dev, + struct i40iw_device_init_info *info); + +void i40iw_sc_cqp_post_sq(struct i40iw_sc_cqp *cqp); + +u64 *i40iw_sc_cqp_get_next_send_wqe(struct i40iw_sc_cqp *cqp, u64 scratch); + +void i40iw_check_cqp_progress(struct i40iw_cqp_timeout *cqp_timeout, struct i40iw_sc_dev *dev); + +enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, + struct i40iw_fast_reg_stag_info *info, + bool post_sq); + +void i40iw_insert_wqe_hdr(u64 *wqe, u64 header); + +/* HMC/FPM functions */ +enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, + u8 hmc_fn_id); + +enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev, u8 vf_hmc_fn_id, + u32 *vf_cnt_array); + +/* stats functions */ +void i40iw_hw_stats_refresh_all(struct i40iw_vsi_pestat *stats); +void i40iw_hw_stats_read_all(struct i40iw_vsi_pestat *stats, struct i40iw_dev_hw_stats *stats_values); +void i40iw_hw_stats_read_32(struct i40iw_vsi_pestat *stats, + enum i40iw_hw_stats_index_32b index, + u64 *value); +void i40iw_hw_stats_read_64(struct i40iw_vsi_pestat *stats, + enum i40iw_hw_stats_index_64b index, + u64 *value); +void i40iw_hw_stats_init(struct i40iw_vsi_pestat *stats, u8 index, bool is_pf); + +/* vsi misc functions */ +enum i40iw_status_code i40iw_vsi_stats_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_stats_info *info); +void i40iw_vsi_stats_free(struct i40iw_sc_vsi *vsi); +void i40iw_sc_vsi_init(struct i40iw_sc_vsi *vsi, struct i40iw_vsi_init_info *info); + +void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2params); +void i40iw_qp_add_qos(struct i40iw_sc_qp *qp); +void i40iw_qp_rem_qos(struct i40iw_sc_qp *qp); +void i40iw_terminate_send_fin(struct i40iw_sc_qp *qp); + +void i40iw_terminate_connection(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info); + +void i40iw_terminate_received(struct i40iw_sc_qp *qp, struct i40iw_aeqe_info *info); + +enum i40iw_status_code i40iw_sc_suspend_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, u64 scratch); + +enum i40iw_status_code i40iw_sc_resume_qp(struct i40iw_sc_cqp *cqp, + struct i40iw_sc_qp *qp, u64 scratch); + +enum i40iw_status_code i40iw_sc_static_hmc_pages_allocated(struct i40iw_sc_cqp *cqp, + u64 scratch, u8 hmc_fn_id, + bool post_sq, + bool poll_registers); + +enum i40iw_status_code i40iw_config_fpm_values(struct i40iw_sc_dev *dev, u32 qp_count); + +void free_sd_mem(struct i40iw_sc_dev *dev); + +enum i40iw_status_code i40iw_process_cqp_cmd(struct i40iw_sc_dev *dev, + struct cqp_commands_info *pcmdinfo); + +enum i40iw_status_code i40iw_process_bh(struct i40iw_sc_dev *dev); + +/* prototype for functions used for dynamic memory allocation */ +enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, + struct i40iw_dma_mem *mem, u64 size, + u32 alignment); +void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem); +enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem, u32 size); +enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem); +u8 i40iw_get_encoded_wqe_size(u32 wqsize, bool cqpsq); +void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev); + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c new file mode 100644 index 000000000..3fafc5424 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c @@ -0,0 +1,610 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_status.h" +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" + +#include <linux/pci.h> +#include <linux/genalloc.h> +#include <linux/vmalloc.h> +#include "i40iw_pble.h" +#include "i40iw.h" + +struct i40iw_device; +static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc); +static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk); + +/** + * i40iw_destroy_pble_pool - destroy pool during module unload + * @pble_rsrc: pble resources + */ +void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct list_head *clist; + struct list_head *tlist; + struct i40iw_chunk *chunk; + struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo; + + if (pinfo->pool) { + list_for_each_safe(clist, tlist, &pinfo->clist) { + chunk = list_entry(clist, struct i40iw_chunk, list); + if (chunk->type == I40IW_VMALLOC) + i40iw_free_vmalloc_mem(dev->hw, chunk); + kfree(chunk); + } + gen_pool_destroy(pinfo->pool); + } +} + +/** + * i40iw_hmc_init_pble - Initialize pble resources during module load + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + */ +enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct i40iw_hmc_info *hmc_info; + u32 fpm_idx = 0; + + hmc_info = dev->hmc_info; + pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base; + /* Now start the pble' on 4k boundary */ + if (pble_rsrc->fpm_base_addr & 0xfff) + fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3; + + pble_rsrc->unallocated_pble = + hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx; + pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3); + + pble_rsrc->pinfo.pool_shift = POOL_SHIFT; + pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1); + INIT_LIST_HEAD(&pble_rsrc->pinfo.clist); + if (!pble_rsrc->pinfo.pool) + goto error; + + if (add_pble_pool(dev, pble_rsrc)) + goto error; + + return 0; + + error:i40iw_destroy_pble_pool(dev, pble_rsrc); + return I40IW_ERR_NO_MEMORY; +} + +/** + * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address + * @ pble_rsrc: structure containing fpm address + * @ idx: where to return indexes + */ +static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct sd_pd_idx *idx) +{ + idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE; + idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE; + idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD); +} + +/** + * add_sd_direct - add sd direct for pble + * @dev: hardware control device structure + * @pble_rsrc: pble resource ptr + * @info: page info for sd + */ +static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_add_page_info *info) +{ + enum i40iw_status_code ret_code = 0; + struct sd_pd_idx *idx = &info->idx; + struct i40iw_chunk *chunk = info->chunk; + struct i40iw_hmc_info *hmc_info = info->hmc_info; + struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry; + u32 offset = 0; + + if (!sd_entry->valid) { + if (dev->is_pf) { + ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info, + info->idx.sd_idx, + I40IW_SD_TYPE_DIRECT, + I40IW_HMC_DIRECT_BP_SIZE); + if (ret_code) + return ret_code; + chunk->type = I40IW_DMA_COHERENT; + } + } + offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT; + chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT; + chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset); + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n", + chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr); + return 0; +} + +/** + * i40iw_free_vmalloc_mem - free vmalloc during close + * @hw: hw struct + * @chunk: chunk information for vmalloc + */ +static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + int i; + + if (!chunk->pg_cnt) + goto done; + for (i = 0; i < chunk->pg_cnt; i++) + dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL); + + done: + kfree(chunk->dmaaddrs); + chunk->dmaaddrs = NULL; + vfree(chunk->vaddr); + chunk->vaddr = NULL; + chunk->type = 0; +} + +/** + * i40iw_get_vmalloc_mem - get 2M page for sd + * @hw: hardware address + * @chunk: chunk to adf + * @pg_cnt: #of 4 K pages + */ +static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw, + struct i40iw_chunk *chunk, + int pg_cnt) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + struct page *page; + u8 *addr; + u32 size; + int i; + + chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL); + if (!chunk->dmaaddrs) + return I40IW_ERR_NO_MEMORY; + size = PAGE_SIZE * pg_cnt; + chunk->vaddr = vmalloc(size); + if (!chunk->vaddr) { + kfree(chunk->dmaaddrs); + chunk->dmaaddrs = NULL; + return I40IW_ERR_NO_MEMORY; + } + chunk->size = size; + addr = (u8 *)chunk->vaddr; + for (i = 0; i < pg_cnt; i++) { + page = vmalloc_to_page((void *)addr); + if (!page) + break; + chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i])) + break; + addr += PAGE_SIZE; + } + + chunk->pg_cnt = i; + chunk->type = I40IW_VMALLOC; + if (i == pg_cnt) + return 0; + + i40iw_free_vmalloc_mem(hw, chunk); + return I40IW_ERR_NO_MEMORY; +} + +/** + * fpm_to_idx - given fpm address, get pble index + * @pble_rsrc: pble resource management + * @addr: fpm address for index + */ +static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr) +{ + return (addr - (pble_rsrc->fpm_base_addr)) >> 3; +} + +/** + * add_bp_pages - add backing pages for sd + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + * @info: page info for sd + */ +static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_add_page_info *info) +{ + u8 *addr; + struct i40iw_dma_mem mem; + struct i40iw_hmc_pd_entry *pd_entry; + struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry; + struct i40iw_hmc_info *hmc_info = info->hmc_info; + struct i40iw_chunk *chunk = info->chunk; + struct i40iw_manage_vf_pble_info vf_pble_info; + enum i40iw_status_code status = 0; + u32 rel_pd_idx = info->idx.rel_pd_idx; + u32 pd_idx = info->idx.pd_idx; + u32 i; + + status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages); + if (status) + return I40IW_ERR_NO_MEMORY; + status = i40iw_add_sd_table_entry(dev->hw, hmc_info, + info->idx.sd_idx, I40IW_SD_TYPE_PAGED, + I40IW_HMC_DIRECT_BP_SIZE); + if (status) + goto error; + if (!dev->is_pf) { + status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE, + fpm_to_idx(pble_rsrc, + pble_rsrc->next_fpm_addr), + (info->pages << PBLE_512_SHIFT)); + if (status) { + i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status); + goto error; + } + } + addr = chunk->vaddr; + for (i = 0; i < info->pages; i++) { + mem.pa = chunk->dmaaddrs[i]; + mem.size = PAGE_SIZE; + mem.va = (void *)(addr); + pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++]; + if (!pd_entry->valid) { + status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem); + if (status) + goto error; + addr += PAGE_SIZE; + } else { + i40iw_pr_err("pd entry is valid expecting to be invalid\n"); + } + } + if (!dev->is_pf) { + vf_pble_info.first_pd_index = info->idx.rel_pd_idx; + vf_pble_info.inv_pd_ent = false; + vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE; + vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa; + vf_pble_info.sd_index = info->idx.sd_idx; + status = i40iw_hw_manage_vf_pble_bp(dev->back_dev, + &vf_pble_info, true); + if (status) { + i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status); + goto error; + } + } + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + return 0; +error: + i40iw_free_vmalloc_mem(dev->hw, chunk); + return status; +} + +/** + * add_pble_pool - add a sd entry for pble resoure + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + */ +static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc) +{ + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_hmc_info *hmc_info; + struct i40iw_chunk *chunk; + struct i40iw_add_page_info info; + struct sd_pd_idx *idx = &info.idx; + enum i40iw_status_code ret_code = 0; + enum i40iw_sd_entry_type sd_entry_type; + u64 sd_reg_val = 0; + u32 pages; + + if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE) + return I40IW_ERR_NO_MEMORY; + if (pble_rsrc->next_fpm_addr & 0xfff) { + i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr); + return I40IW_ERR_INVALID_PAGE_DESC_INDEX; + } + chunk = kzalloc(sizeof(*chunk), GFP_KERNEL); + if (!chunk) + return I40IW_ERR_NO_MEMORY; + hmc_info = dev->hmc_info; + chunk->fpm_addr = pble_rsrc->next_fpm_addr; + get_sd_pd_idx(pble_rsrc, idx); + sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx]; + pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD - + idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD; + pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT); + info.chunk = chunk; + info.hmc_info = hmc_info; + info.pages = pages; + info.sd_entry = sd_entry; + if (!sd_entry->valid) { + sd_entry_type = (!idx->rel_pd_idx && + (pages == I40IW_HMC_PD_CNT_IN_SD) && + dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED; + } else { + sd_entry_type = sd_entry->entry_type; + } + i40iw_debug(dev, I40IW_DEBUG_PBLE, + "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n", + pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr); + i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n", + sd_entry_type, sd_entry->valid); + + if (sd_entry_type == I40IW_SD_TYPE_DIRECT) + ret_code = add_sd_direct(dev, pble_rsrc, &info); + if (ret_code) + sd_entry_type = I40IW_SD_TYPE_PAGED; + else + pble_rsrc->stats_direct_sds++; + + if (sd_entry_type == I40IW_SD_TYPE_PAGED) { + ret_code = add_bp_pages(dev, pble_rsrc, &info); + if (ret_code) + goto error; + else + pble_rsrc->stats_paged_sds++; + } + + if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr, + (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) { + i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n"); + ret_code = I40IW_ERR_NO_MEMORY; + goto error; + } + pble_rsrc->next_fpm_addr += chunk->size; + i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n", + pble_rsrc->next_fpm_addr, chunk->size, chunk->size); + pble_rsrc->unallocated_pble -= (chunk->size >> 3); + sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; + if (dev->is_pf && !sd_entry->valid) { + ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id, + sd_reg_val, idx->sd_idx, + sd_entry->entry_type, true); + if (ret_code) { + i40iw_pr_err("cqp cmd failed for sd (pbles)\n"); + goto error; + } + } + + sd_entry->valid = true; + list_add(&chunk->list, &pble_rsrc->pinfo.clist); + return 0; + error: + kfree(chunk); + return ret_code; +} + +/** + * free_lvl2 - fee level 2 pble + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + */ +static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + u32 i; + struct gen_pool *pool; + struct i40iw_pble_level2 *lvl2 = &palloc->level2; + struct i40iw_pble_info *root = &lvl2->root; + struct i40iw_pble_info *leaf = lvl2->leaf; + + pool = pble_rsrc->pinfo.pool; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + if (leaf->addr) + gen_pool_free(pool, leaf->addr, (leaf->cnt << 3)); + else + break; + } + + if (root->addr) + gen_pool_free(pool, root->addr, (root->cnt << 3)); + + kfree(lvl2->leaf); + lvl2->leaf = NULL; +} + +/** + * get_lvl2_pble - get level 2 pble resource + * @pble_rsrc: pble resource management + * @palloc: level 2 pble allocation + * @pool: pool pointer + */ +static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + struct gen_pool *pool) +{ + u32 lf4k, lflast, total, i; + u32 pblcnt = PBLE_PER_PAGE; + u64 *addr; + struct i40iw_pble_level2 *lvl2 = &palloc->level2; + struct i40iw_pble_info *root = &lvl2->root; + struct i40iw_pble_info *leaf; + + /* number of full 512 (4K) leafs) */ + lf4k = palloc->total_cnt >> 9; + lflast = palloc->total_cnt % PBLE_PER_PAGE; + total = (lflast == 0) ? lf4k : lf4k + 1; + lvl2->leaf_cnt = total; + + leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC); + if (!leaf) + return I40IW_ERR_NO_MEMORY; + lvl2->leaf = leaf; + /* allocate pbles for the root */ + root->addr = gen_pool_alloc(pool, (total << 3)); + if (!root->addr) { + kfree(lvl2->leaf); + lvl2->leaf = NULL; + return I40IW_ERR_NO_MEMORY; + } + root->idx = fpm_to_idx(pble_rsrc, + (u64)gen_pool_virt_to_phys(pool, root->addr)); + root->cnt = total; + addr = (u64 *)root->addr; + for (i = 0; i < total; i++, leaf++) { + pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE; + leaf->addr = gen_pool_alloc(pool, (pblcnt << 3)); + if (!leaf->addr) + goto error; + leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr)); + + leaf->cnt = pblcnt; + *addr = (u64)leaf->idx; + addr++; + } + palloc->level = I40IW_LEVEL_2; + pble_rsrc->stats_lvl2++; + return 0; + error: + free_lvl2(pble_rsrc, palloc); + return I40IW_ERR_NO_MEMORY; +} + +/** + * get_lvl1_pble - get level 1 pble resource + * @dev: hardware control device structure + * @pble_rsrc: pble resource management + * @palloc: level 1 pble allocation + */ +static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + u64 *addr; + struct gen_pool *pool; + struct i40iw_pble_info *lvl1 = &palloc->level1; + + pool = pble_rsrc->pinfo.pool; + addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3)); + + if (!addr) + return I40IW_ERR_NO_MEMORY; + + palloc->level = I40IW_LEVEL_1; + lvl1->addr = (unsigned long)addr; + lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, + (unsigned long)addr)); + lvl1->cnt = palloc->total_cnt; + pble_rsrc->stats_lvl1++; + return 0; +} + +/** + * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pool: pointer to general purpose special memory pool descriptor + */ +static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + struct gen_pool *pool) +{ + enum i40iw_status_code status = 0; + + status = get_lvl1_pble(dev, pble_rsrc, palloc); + if (status && (palloc->total_cnt > PBLE_PER_PAGE)) + status = get_lvl2_pble(pble_rsrc, palloc, pool); + return status; +} + +/** + * i40iw_get_pble - allocate pbles from the pool + * @dev: i40iw_sc_dev struct + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble (idx + pble addr) + * @pble_cnt: #of pbles requested + */ +enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + u32 pble_cnt) +{ + struct gen_pool *pool; + enum i40iw_status_code status = 0; + u32 max_sds = 0; + int i; + + pool = pble_rsrc->pinfo.pool; + palloc->total_cnt = pble_cnt; + palloc->level = I40IW_LEVEL_0; + /*check first to see if we can get pble's without acquiring additional sd's */ + status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); + if (!status) + goto exit; + max_sds = (palloc->total_cnt >> 18) + 1; + for (i = 0; i < max_sds; i++) { + status = add_pble_pool(dev, pble_rsrc); + if (status) + break; + status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool); + if (!status) + break; + } +exit: + if (!status) + pble_rsrc->stats_alloc_ok++; + else + pble_rsrc->stats_alloc_fail++; + + return status; +} + +/** + * i40iw_free_pble - put pbles back into pool + * @pble_rsrc: pble resources + * @palloc: contains all inforamtion regarding pble resource being freed + */ +void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc) +{ + struct gen_pool *pool; + + pool = pble_rsrc->pinfo.pool; + if (palloc->level == I40IW_LEVEL_2) + free_lvl2(pble_rsrc, palloc); + else + gen_pool_free(pool, palloc->level1.addr, + (palloc->level1.cnt << 3)); + pble_rsrc->stats_alloc_freed++; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.h b/drivers/infiniband/hw/i40iw/i40iw_pble.h new file mode 100644 index 000000000..7b1851d21 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_pble.h @@ -0,0 +1,131 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_PBLE_H +#define I40IW_PBLE_H + +#define POOL_SHIFT 6 +#define PBLE_PER_PAGE 512 +#define I40IW_HMC_PAGED_BP_SHIFT 12 +#define PBLE_512_SHIFT 9 + +enum i40iw_pble_level { + I40IW_LEVEL_0 = 0, + I40IW_LEVEL_1 = 1, + I40IW_LEVEL_2 = 2 +}; + +enum i40iw_alloc_type { + I40IW_NO_ALLOC = 0, + I40IW_DMA_COHERENT = 1, + I40IW_VMALLOC = 2 +}; + +struct i40iw_pble_info { + unsigned long addr; + u32 idx; + u32 cnt; +}; + +struct i40iw_pble_level2 { + struct i40iw_pble_info root; + struct i40iw_pble_info *leaf; + u32 leaf_cnt; +}; + +struct i40iw_pble_alloc { + u32 total_cnt; + enum i40iw_pble_level level; + union { + struct i40iw_pble_info level1; + struct i40iw_pble_level2 level2; + }; +}; + +struct sd_pd_idx { + u32 sd_idx; + u32 pd_idx; + u32 rel_pd_idx; +}; + +struct i40iw_add_page_info { + struct i40iw_chunk *chunk; + struct i40iw_hmc_sd_entry *sd_entry; + struct i40iw_hmc_info *hmc_info; + struct sd_pd_idx idx; + u32 pages; +}; + +struct i40iw_chunk { + struct list_head list; + u32 size; + void *vaddr; + u64 fpm_addr; + u32 pg_cnt; + dma_addr_t *dmaaddrs; + enum i40iw_alloc_type type; +}; + +struct i40iw_pble_pool { + struct gen_pool *pool; + struct list_head clist; + u32 total_pble_alloc; + u32 free_pble_cnt; + u32 pool_shift; +}; + +struct i40iw_hmc_pble_rsrc { + u32 unallocated_pble; + u64 fpm_base_addr; + u64 next_fpm_addr; + struct i40iw_pble_pool pinfo; + + u32 stats_direct_sds; + u32 stats_paged_sds; + u64 stats_alloc_ok; + u64 stats_alloc_fail; + u64 stats_alloc_freed; + u64 stats_lvl1; + u64 stats_lvl2; +}; + +void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc); +enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc); +void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc, struct i40iw_pble_alloc *palloc); +enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev, + struct i40iw_hmc_pble_rsrc *pble_rsrc, + struct i40iw_pble_alloc *palloc, + u32 pble_cnt); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c new file mode 100644 index 000000000..d9c7ae6a7 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -0,0 +1,1493 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" + +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_puda.h" + +static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi, + struct i40iw_puda_buf *buf); +static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid); +static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx); +static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc + *rsrc, bool initial); +/** + * i40iw_puda_get_listbuf - get buffer from puda list + * @list: list to use for buffers (ILQ or IEQ) + */ +static struct i40iw_puda_buf *i40iw_puda_get_listbuf(struct list_head *list) +{ + struct i40iw_puda_buf *buf = NULL; + + if (!list_empty(list)) { + buf = (struct i40iw_puda_buf *)list->next; + list_del((struct list_head *)&buf->list); + } + return buf; +} + +/** + * i40iw_puda_get_bufpool - return buffer from resource + * @rsrc: resource to use for buffer + */ +struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_puda_buf *buf = NULL; + struct list_head *list = &rsrc->bufpool; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + buf = i40iw_puda_get_listbuf(list); + if (buf) + rsrc->avail_buf_count--; + else + rsrc->stats_buf_alloc_fail++; + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + return buf; +} + +/** + * i40iw_puda_ret_bufpool - return buffer to rsrc list + * @rsrc: resource to use for buffer + * @buf: buffe to return to resouce + */ +void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf) +{ + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + list_add(&buf->list, &rsrc->bufpool); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->avail_buf_count++; +} + +/** + * i40iw_puda_post_recvbuf - set wqe for rcv buffer + * @rsrc: resource ptr + * @wqe_idx: wqe index to use + * @buf: puda buffer for rcv q + * @initial: flag if during init time + */ +static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, + struct i40iw_puda_buf *buf, bool initial) +{ + u64 *wqe; + struct i40iw_sc_qp *qp = &rsrc->qp; + u64 offset24 = 0; + + qp->qp_uk.rq_wrid_array[wqe_idx] = (uintptr_t)buf; + wqe = qp->qp_uk.rq_base[wqe_idx].elem; + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: wqe_idx= %d buf = %p wqe = %p\n", __func__, + wqe_idx, buf, wqe); + if (!initial) + get_64bit_val(wqe, 24, &offset24); + + offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); + + set_64bit_val(wqe, 0, buf->mem.pa); + set_64bit_val(wqe, 8, + LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); + i40iw_insert_wqe_hdr(wqe, offset24); +} + +/** + * i40iw_puda_replenish_rq - post rcv buffers + * @rsrc: resource to use for buffer + * @initial: flag if during init time + */ +static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc *rsrc, + bool initial) +{ + u32 i; + u32 invalid_cnt = rsrc->rxq_invalid_cnt; + struct i40iw_puda_buf *buf = NULL; + + for (i = 0; i < invalid_cnt; i++) { + buf = i40iw_puda_get_bufpool(rsrc); + if (!buf) + return I40IW_ERR_list_empty; + i40iw_puda_post_recvbuf(rsrc, rsrc->rx_wqe_idx, buf, + initial); + rsrc->rx_wqe_idx = + ((rsrc->rx_wqe_idx + 1) % rsrc->rq_size); + rsrc->rxq_invalid_cnt--; + } + return 0; +} + +/** + * i40iw_puda_alloc_buf - allocate mem for buffer + * @dev: iwarp device + * @length: length of buffer + */ +static struct i40iw_puda_buf *i40iw_puda_alloc_buf(struct i40iw_sc_dev *dev, + u32 length) +{ + struct i40iw_puda_buf *buf = NULL; + struct i40iw_virt_mem buf_mem; + enum i40iw_status_code ret; + + ret = i40iw_allocate_virt_mem(dev->hw, &buf_mem, + sizeof(struct i40iw_puda_buf)); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s: error mem for buf\n", __func__); + return NULL; + } + buf = (struct i40iw_puda_buf *)buf_mem.va; + ret = i40iw_allocate_dma_mem(dev->hw, &buf->mem, length, 1); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s: error dma mem for buf\n", __func__); + i40iw_free_virt_mem(dev->hw, &buf_mem); + return NULL; + } + buf->buf_mem.va = buf_mem.va; + buf->buf_mem.size = buf_mem.size; + return buf; +} + +/** + * i40iw_puda_dele_buf - delete buffer back to system + * @dev: iwarp device + * @buf: buffer to free + */ +static void i40iw_puda_dele_buf(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf) +{ + i40iw_free_dma_mem(dev->hw, &buf->mem); + i40iw_free_virt_mem(dev->hw, &buf->buf_mem); +} + +/** + * i40iw_puda_get_next_send_wqe - return next wqe for processing + * @qp: puda qp for wqe + * @wqe_idx: wqe index for caller + */ +static u64 *i40iw_puda_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) +{ + u64 *wqe = NULL; + enum i40iw_status_code ret_code = 0; + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return wqe; + wqe = qp->sq_base[*wqe_idx].elem; + + return wqe; +} + +/** + * i40iw_puda_poll_info - poll cq for completion + * @cq: cq for poll + * @info: info return for successful completion + */ +static enum i40iw_status_code i40iw_puda_poll_info(struct i40iw_sc_cq *cq, + struct i40iw_puda_completion_info *info) +{ + u64 qword0, qword2, qword3; + u64 *cqe; + u64 comp_ctx; + bool valid_bit; + u32 major_err, minor_err; + bool error; + + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk); + get_64bit_val(cqe, 24, &qword3); + valid_bit = (bool)RS_64(qword3, I40IW_CQ_VALID); + + if (valid_bit != cq->cq_uk.polarity) + return I40IW_ERR_QUEUE_EMPTY; + + i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32); + error = (bool)RS_64(qword3, I40IW_CQ_ERROR); + if (error) { + i40iw_debug(cq->dev, I40IW_DEBUG_PUDA, "%s receive error\n", __func__); + major_err = (u32)(RS_64(qword3, I40IW_CQ_MAJERR)); + minor_err = (u32)(RS_64(qword3, I40IW_CQ_MINERR)); + info->compl_error = major_err << 16 | minor_err; + return I40IW_ERR_CQ_COMPL_ERROR; + } + + get_64bit_val(cqe, 0, &qword0); + get_64bit_val(cqe, 16, &qword2); + + info->q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); + info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); + + get_64bit_val(cqe, 8, &comp_ctx); + info->qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; + info->wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); + + if (info->q_type == I40IW_CQE_QTYPE_RQ) { + info->vlan_valid = (bool)RS_64(qword3, I40IW_VLAN_TAG_VALID); + info->l4proto = (u8)RS_64(qword2, I40IW_UDA_L4PROTO); + info->l3proto = (u8)RS_64(qword2, I40IW_UDA_L3PROTO); + info->payload_len = (u16)RS_64(qword0, I40IW_UDA_PAYLOADLEN); + } + + return 0; +} + +/** + * i40iw_puda_poll_completion - processes completion for cq + * @dev: iwarp device + * @cq: cq getting interrupt + * @compl_err: return any completion err + */ +enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, + struct i40iw_sc_cq *cq, u32 *compl_err) +{ + struct i40iw_qp_uk *qp; + struct i40iw_cq_uk *cq_uk = &cq->cq_uk; + struct i40iw_puda_completion_info info; + enum i40iw_status_code ret = 0; + struct i40iw_puda_buf *buf; + struct i40iw_puda_rsrc *rsrc; + void *sqwrid; + u8 cq_type = cq->cq_type; + unsigned long flags; + + if ((cq_type == I40IW_CQ_TYPE_ILQ) || (cq_type == I40IW_CQ_TYPE_IEQ)) { + rsrc = (cq_type == I40IW_CQ_TYPE_ILQ) ? cq->vsi->ilq : cq->vsi->ieq; + } else { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s qp_type error\n", __func__); + return I40IW_ERR_BAD_PTR; + } + memset(&info, 0, sizeof(info)); + ret = i40iw_puda_poll_info(cq, &info); + *compl_err = info.compl_error; + if (ret == I40IW_ERR_QUEUE_EMPTY) + return ret; + if (ret) + goto done; + + qp = info.qp; + if (!qp || !rsrc) { + ret = I40IW_ERR_BAD_PTR; + goto done; + } + + if (qp->qp_id != rsrc->qp_id) { + ret = I40IW_ERR_BAD_PTR; + goto done; + } + + if (info.q_type == I40IW_CQE_QTYPE_RQ) { + buf = (struct i40iw_puda_buf *)(uintptr_t)qp->rq_wrid_array[info.wqe_idx]; + /* Get all the tcpip information in the buf header */ + ret = i40iw_puda_get_tcpip_info(&info, buf); + if (ret) { + rsrc->stats_rcvd_pkt_err++; + if (cq_type == I40IW_CQ_TYPE_ILQ) { + i40iw_ilq_putback_rcvbuf(&rsrc->qp, + info.wqe_idx); + } else { + i40iw_puda_ret_bufpool(rsrc, buf); + i40iw_puda_replenish_rq(rsrc, false); + } + goto done; + } + + rsrc->stats_pkt_rcvd++; + rsrc->compl_rxwqe_idx = info.wqe_idx; + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s RQ completion\n", __func__); + rsrc->receive(rsrc->vsi, buf); + if (cq_type == I40IW_CQ_TYPE_ILQ) + i40iw_ilq_putback_rcvbuf(&rsrc->qp, info.wqe_idx); + else + i40iw_puda_replenish_rq(rsrc, false); + + } else { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s SQ completion\n", __func__); + sqwrid = (void *)(uintptr_t)qp->sq_wrtrk_array[info.wqe_idx].wrid; + I40IW_RING_SET_TAIL(qp->sq_ring, info.wqe_idx); + rsrc->xmit_complete(rsrc->vsi, sqwrid); + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + rsrc->tx_wqe_avail_cnt++; + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + if (!list_empty(&rsrc->txpend)) + i40iw_puda_send_buf(rsrc, NULL); + } + +done: + I40IW_RING_MOVE_HEAD(cq_uk->cq_ring, ret); + if (I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring) == 0) + cq_uk->polarity = !cq_uk->polarity; + /* update cq tail in cq shadow memory also */ + I40IW_RING_MOVE_TAIL(cq_uk->cq_ring); + set_64bit_val(cq_uk->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq_uk->cq_ring)); + return 0; +} + +/** + * i40iw_puda_send - complete send wqe for transmit + * @qp: puda qp for send + * @info: buffer information for transmit + */ +enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, + struct i40iw_puda_send_info *info) +{ + u64 *wqe; + u32 iplen, l4len; + u64 header[2]; + u32 wqe_idx; + u8 iipt; + + /* number of 32 bits DWORDS in header */ + l4len = info->tcplen >> 2; + if (info->ipv4) { + iipt = 3; + iplen = 5; + } else { + iipt = 1; + iplen = 10; + } + + wqe = i40iw_puda_get_next_send_wqe(&qp->qp_uk, &wqe_idx); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + qp->qp_uk.sq_wrtrk_array[wqe_idx].wrid = (uintptr_t)info->scratch; + /* Third line of WQE descriptor */ + /* maclen is in words */ + header[0] = LS_64((info->maclen >> 1), I40IW_UDA_QPSQ_MACLEN) | + LS_64(iplen, I40IW_UDA_QPSQ_IPLEN) | LS_64(1, I40IW_UDA_QPSQ_L4T) | + LS_64(iipt, I40IW_UDA_QPSQ_IIPT) | + LS_64(l4len, I40IW_UDA_QPSQ_L4LEN); + /* Forth line of WQE descriptor */ + header[1] = LS_64(I40IW_OP_TYPE_SEND, I40IW_UDA_QPSQ_OPCODE) | + LS_64(1, I40IW_UDA_QPSQ_SIGCOMPL) | + LS_64(info->doloopback, I40IW_UDA_QPSQ_DOLOOPBACK) | + LS_64(qp->qp_uk.swqe_polarity, I40IW_UDA_QPSQ_VALID); + + set_64bit_val(wqe, 0, info->paddr); + set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); + set_64bit_val(wqe, 16, header[0]); + + i40iw_insert_wqe_hdr(wqe, header[1]); + + i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); + i40iw_qp_post_wr(&qp->qp_uk); + return 0; +} + +/** + * i40iw_puda_send_buf - transmit puda buffer + * @rsrc: resource to use for buffer + * @buf: puda buffer to transmit + */ +void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_send_info info; + enum i40iw_status_code ret = 0; + unsigned long flags; + + spin_lock_irqsave(&rsrc->bufpool_lock, flags); + /* if no wqe available or not from a completion and we have + * pending buffers, we must queue new buffer + */ + if (!rsrc->tx_wqe_avail_cnt || (buf && !list_empty(&rsrc->txpend))) { + list_add_tail(&buf->list, &rsrc->txpend); + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); + rsrc->stats_sent_pkt_q++; + if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: adding to txpend\n", __func__); + return; + } + rsrc->tx_wqe_avail_cnt--; + /* if we are coming from a completion and have pending buffers + * then Get one from pending list + */ + if (!buf) { + buf = i40iw_puda_get_listbuf(&rsrc->txpend); + if (!buf) + goto done; + } + + info.scratch = (void *)buf; + info.paddr = buf->mem.pa; + info.len = buf->totallen; + info.tcplen = buf->tcphlen; + info.maclen = buf->maclen; + info.ipv4 = buf->ipv4; + info.doloopback = (rsrc->type == I40IW_PUDA_RSRC_TYPE_IEQ); + + ret = i40iw_puda_send(&rsrc->qp, &info); + if (ret) { + rsrc->tx_wqe_avail_cnt++; + rsrc->stats_sent_pkt_q++; + list_add(&buf->list, &rsrc->txpend); + if (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, + "%s: adding to puda_send\n", __func__); + } else { + rsrc->stats_pkt_sent++; + } +done: + spin_unlock_irqrestore(&rsrc->bufpool_lock, flags); +} + +/** + * i40iw_puda_qp_setctx - during init, set qp's context + * @rsrc: qp's resource + */ +static void i40iw_puda_qp_setctx(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_qp *qp = &rsrc->qp; + u64 *qp_ctx = qp->hw_host_ctx; + + set_64bit_val(qp_ctx, 8, qp->sq_pa); + set_64bit_val(qp_ctx, 16, qp->rq_pa); + + set_64bit_val(qp_ctx, 24, + LS_64(qp->hw_rq_size, I40IWQPC_RQSIZE) | + LS_64(qp->hw_sq_size, I40IWQPC_SQSIZE)); + + set_64bit_val(qp_ctx, 48, LS_64(rsrc->buf_size, I40IW_UDA_QPC_MAXFRAMESIZE)); + set_64bit_val(qp_ctx, 56, 0); + set_64bit_val(qp_ctx, 64, 1); + + set_64bit_val(qp_ctx, 136, + LS_64(rsrc->cq_id, I40IWQPC_TXCQNUM) | + LS_64(rsrc->cq_id, I40IWQPC_RXCQNUM)); + + set_64bit_val(qp_ctx, 160, LS_64(1, I40IWQPC_PRIVEN)); + + set_64bit_val(qp_ctx, 168, + LS_64((uintptr_t)qp, I40IWQPC_QPCOMPCTX)); + + set_64bit_val(qp_ctx, 176, + LS_64(qp->sq_tph_val, I40IWQPC_SQTPHVAL) | + LS_64(qp->rq_tph_val, I40IWQPC_RQTPHVAL) | + LS_64(qp->qs_handle, I40IWQPC_QSHANDLE)); + + i40iw_debug_buf(rsrc->dev, I40IW_DEBUG_PUDA, "PUDA QP CONTEXT", + qp_ctx, I40IW_QP_CTX_SIZE); +} + +/** + * i40iw_puda_qp_wqe - setup wqe for qp create + * @rsrc: resource for qp + */ +static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) +{ + struct i40iw_sc_cqp *cqp; + u64 *wqe; + u64 header; + struct i40iw_ccq_cqe_info compl_info; + enum i40iw_status_code status = 0; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); + set_64bit_val(wqe, 40, qp->shadow_area_pa); + header = qp->qp_uk.qp_id | + LS_64(I40IW_CQP_OP_CREATE_QP, I40IW_CQPSQ_OPCODE) | + LS_64(I40IW_QP_TYPE_UDA, I40IW_CQPSQ_QP_QPTYPE) | + LS_64(1, I40IW_CQPSQ_QP_CQNUMVALID) | + LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); + i40iw_sc_cqp_post_sq(cqp); + status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_CREATE_QP, + &compl_info); + return status; +} + +/** + * i40iw_puda_qp_create - create qp for resource + * @rsrc: resource to use for buffer + */ +static enum i40iw_status_code i40iw_puda_qp_create(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_qp *qp = &rsrc->qp; + struct i40iw_qp_uk *ukqp = &qp->qp_uk; + enum i40iw_status_code ret = 0; + u32 sq_size, rq_size, t_size; + struct i40iw_dma_mem *mem; + + sq_size = rsrc->sq_size * I40IW_QP_WQE_MIN_SIZE; + rq_size = rsrc->rq_size * I40IW_QP_WQE_MIN_SIZE; + t_size = (sq_size + rq_size + (I40IW_SHADOW_AREA_SIZE << 3) + + I40IW_QP_CTX_SIZE); + /* Get page aligned memory */ + ret = + i40iw_allocate_dma_mem(rsrc->dev->hw, &rsrc->qpmem, t_size, + I40IW_HW_PAGE_SIZE); + if (ret) { + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s: error dma mem\n", __func__); + return ret; + } + + mem = &rsrc->qpmem; + memset(mem->va, 0, t_size); + qp->hw_sq_size = i40iw_get_encoded_wqe_size(rsrc->sq_size, false); + qp->hw_rq_size = i40iw_get_encoded_wqe_size(rsrc->rq_size, false); + qp->pd = &rsrc->sc_pd; + qp->qp_type = I40IW_QP_TYPE_UDA; + qp->dev = rsrc->dev; + qp->back_qp = (void *)rsrc; + qp->sq_pa = mem->pa; + qp->rq_pa = qp->sq_pa + sq_size; + qp->vsi = rsrc->vsi; + ukqp->sq_base = mem->va; + ukqp->rq_base = &ukqp->sq_base[rsrc->sq_size]; + ukqp->shadow_area = ukqp->rq_base[rsrc->rq_size].elem; + qp->shadow_area_pa = qp->rq_pa + rq_size; + qp->hw_host_ctx = ukqp->shadow_area + I40IW_SHADOW_AREA_SIZE; + qp->hw_host_ctx_pa = + qp->shadow_area_pa + (I40IW_SHADOW_AREA_SIZE << 3); + ukqp->qp_id = rsrc->qp_id; + ukqp->sq_wrtrk_array = rsrc->sq_wrtrk_array; + ukqp->rq_wrid_array = rsrc->rq_wrid_array; + + ukqp->qp_id = rsrc->qp_id; + ukqp->sq_size = rsrc->sq_size; + ukqp->rq_size = rsrc->rq_size; + + I40IW_RING_INIT(ukqp->sq_ring, ukqp->sq_size); + I40IW_RING_INIT(ukqp->initial_ring, ukqp->sq_size); + I40IW_RING_INIT(ukqp->rq_ring, ukqp->rq_size); + + if (qp->pd->dev->is_pf) + ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + I40E_PFPE_WQEALLOC); + else + ukqp->wqe_alloc_reg = (u32 __iomem *)(i40iw_get_hw_addr(qp->pd->dev) + + I40E_VFPE_WQEALLOC1); + + qp->user_pri = 0; + i40iw_qp_add_qos(qp); + i40iw_puda_qp_setctx(rsrc); + if (rsrc->dev->ceq_valid) + ret = i40iw_cqp_qp_create_cmd(rsrc->dev, qp); + else + ret = i40iw_puda_qp_wqe(rsrc->dev, qp); + if (ret) { + i40iw_qp_rem_qos(qp); + i40iw_free_dma_mem(rsrc->dev->hw, &rsrc->qpmem); + } + return ret; +} + +/** + * i40iw_puda_cq_wqe - setup wqe for cq create + * @rsrc: resource for cq + */ +static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) +{ + u64 *wqe; + struct i40iw_sc_cqp *cqp; + u64 header; + struct i40iw_ccq_cqe_info compl_info; + enum i40iw_status_code status = 0; + + cqp = dev->cqp; + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, 0); + if (!wqe) + return I40IW_ERR_RING_FULL; + + set_64bit_val(wqe, 0, cq->cq_uk.cq_size); + set_64bit_val(wqe, 8, RS_64_1(cq, 1)); + set_64bit_val(wqe, 16, + LS_64(cq->shadow_read_threshold, + I40IW_CQPSQ_CQ_SHADOW_READ_THRESHOLD)); + set_64bit_val(wqe, 32, cq->cq_pa); + + set_64bit_val(wqe, 40, cq->shadow_area_pa); + + header = cq->cq_uk.cq_id | + LS_64(I40IW_CQP_OP_CREATE_CQ, I40IW_CQPSQ_OPCODE) | + LS_64(1, I40IW_CQPSQ_CQ_CHKOVERFLOW) | + LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | + LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + i40iw_insert_wqe_hdr(wqe, header); + + i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", + wqe, I40IW_CQP_WQE_SIZE * 8); + + i40iw_sc_cqp_post_sq(dev->cqp); + status = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_CREATE_CQ, + &compl_info); + return status; +} + +/** + * i40iw_puda_cq_create - create cq for resource + * @rsrc: resource for which cq to create + */ +static enum i40iw_status_code i40iw_puda_cq_create(struct i40iw_puda_rsrc *rsrc) +{ + struct i40iw_sc_dev *dev = rsrc->dev; + struct i40iw_sc_cq *cq = &rsrc->cq; + enum i40iw_status_code ret = 0; + u32 tsize, cqsize; + struct i40iw_dma_mem *mem; + struct i40iw_cq_init_info info; + struct i40iw_cq_uk_init_info *init_info = &info.cq_uk_init_info; + + cq->vsi = rsrc->vsi; + cqsize = rsrc->cq_size * (sizeof(struct i40iw_cqe)); + tsize = cqsize + sizeof(struct i40iw_cq_shadow_area); + ret = i40iw_allocate_dma_mem(dev->hw, &rsrc->cqmem, tsize, + I40IW_CQ0_ALIGNMENT); + if (ret) + return ret; + + mem = &rsrc->cqmem; + memset(&info, 0, sizeof(info)); + info.dev = dev; + info.type = (rsrc->type == I40IW_PUDA_RSRC_TYPE_ILQ) ? + I40IW_CQ_TYPE_ILQ : I40IW_CQ_TYPE_IEQ; + info.shadow_read_threshold = rsrc->cq_size >> 2; + info.ceq_id_valid = true; + info.cq_base_pa = mem->pa; + info.shadow_area_pa = mem->pa + cqsize; + init_info->cq_base = mem->va; + init_info->shadow_area = (u64 *)((u8 *)mem->va + cqsize); + init_info->cq_size = rsrc->cq_size; + init_info->cq_id = rsrc->cq_id; + info.ceqe_mask = true; + info.ceq_id_valid = true; + ret = dev->iw_priv_cq_ops->cq_init(cq, &info); + if (ret) + goto error; + if (rsrc->dev->ceq_valid) + ret = i40iw_cqp_cq_create_cmd(dev, cq); + else + ret = i40iw_puda_cq_wqe(dev, cq); +error: + if (ret) + i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); + return ret; +} + +/** + * i40iw_puda_free_qp - free qp for resource + * @rsrc: resource for which qp to free + */ +static void i40iw_puda_free_qp(struct i40iw_puda_rsrc *rsrc) +{ + enum i40iw_status_code ret; + struct i40iw_ccq_cqe_info compl_info; + struct i40iw_sc_dev *dev = rsrc->dev; + + if (rsrc->dev->ceq_valid) { + i40iw_cqp_qp_destroy_cmd(dev, &rsrc->qp); + return; + } + + ret = dev->iw_priv_qp_ops->qp_destroy(&rsrc->qp, + 0, false, true, true); + if (ret) + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s error puda qp destroy wqe\n", + __func__); + + if (!ret) { + ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_DESTROY_QP, + &compl_info); + if (ret) + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s error puda qp destroy failed\n", + __func__); + } +} + +/** + * i40iw_puda_free_cq - free cq for resource + * @rsrc: resource for which cq to free + */ +static void i40iw_puda_free_cq(struct i40iw_puda_rsrc *rsrc) +{ + enum i40iw_status_code ret; + struct i40iw_ccq_cqe_info compl_info; + struct i40iw_sc_dev *dev = rsrc->dev; + + if (rsrc->dev->ceq_valid) { + i40iw_cqp_cq_destroy_cmd(dev, &rsrc->cq); + return; + } + ret = dev->iw_priv_cq_ops->cq_destroy(&rsrc->cq, 0, true); + + if (ret) + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s error ieq cq destroy\n", + __func__); + + if (!ret) { + ret = dev->cqp_ops->poll_for_cqp_op_done(dev->cqp, + I40IW_CQP_OP_DESTROY_CQ, + &compl_info); + if (ret) + i40iw_debug(dev, I40IW_DEBUG_PUDA, + "%s error ieq qp destroy done\n", + __func__); + } +} + +/** + * i40iw_puda_dele_resources - delete all resources during close + * @dev: iwarp device + * @type: type of resource to dele + * @reset: true if reset chip + */ +void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, + enum puda_resource_type type, + bool reset) +{ + struct i40iw_sc_dev *dev = vsi->dev; + struct i40iw_puda_rsrc *rsrc; + struct i40iw_puda_buf *buf = NULL; + struct i40iw_puda_buf *nextbuf = NULL; + struct i40iw_virt_mem *vmem; + + switch (type) { + case I40IW_PUDA_RSRC_TYPE_ILQ: + rsrc = vsi->ilq; + vmem = &vsi->ilq_mem; + break; + case I40IW_PUDA_RSRC_TYPE_IEQ: + rsrc = vsi->ieq; + vmem = &vsi->ieq_mem; + break; + default: + i40iw_debug(dev, I40IW_DEBUG_PUDA, "%s: error resource type = 0x%x\n", + __func__, type); + return; + } + + switch (rsrc->completion) { + case PUDA_HASH_CRC_COMPLETE: + i40iw_free_hash_desc(rsrc->hash_desc); + /* fall through */ + case PUDA_QP_CREATED: + if (!reset) + i40iw_puda_free_qp(rsrc); + + i40iw_free_dma_mem(dev->hw, &rsrc->qpmem); + /* fallthrough */ + case PUDA_CQ_CREATED: + if (!reset) + i40iw_puda_free_cq(rsrc); + + i40iw_free_dma_mem(dev->hw, &rsrc->cqmem); + break; + default: + i40iw_debug(rsrc->dev, I40IW_DEBUG_PUDA, "%s error no resources\n", __func__); + break; + } + /* Free all allocated puda buffers for both tx and rx */ + buf = rsrc->alloclist; + while (buf) { + nextbuf = buf->next; + i40iw_puda_dele_buf(dev, buf); + buf = nextbuf; + rsrc->alloc_buf_count--; + } + i40iw_free_virt_mem(dev->hw, vmem); +} + +/** + * i40iw_puda_allocbufs - allocate buffers for resource + * @rsrc: resource for buffer allocation + * @count: number of buffers to create + */ +static enum i40iw_status_code i40iw_puda_allocbufs(struct i40iw_puda_rsrc *rsrc, + u32 count) +{ + u32 i; + struct i40iw_puda_buf *buf; + struct i40iw_puda_buf *nextbuf; + + for (i = 0; i < count; i++) { + buf = i40iw_puda_alloc_buf(rsrc->dev, rsrc->buf_size); + if (!buf) { + rsrc->stats_buf_alloc_fail++; + return I40IW_ERR_NO_MEMORY; + } + i40iw_puda_ret_bufpool(rsrc, buf); + rsrc->alloc_buf_count++; + if (!rsrc->alloclist) { + rsrc->alloclist = buf; + } else { + nextbuf = rsrc->alloclist; + rsrc->alloclist = buf; + buf->next = nextbuf; + } + } + rsrc->avail_buf_count = rsrc->alloc_buf_count; + return 0; +} + +/** + * i40iw_puda_create_rsrc - create resouce (ilq or ieq) + * @dev: iwarp device + * @info: resource information + */ +enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi, + struct i40iw_puda_rsrc_info *info) +{ + struct i40iw_sc_dev *dev = vsi->dev; + enum i40iw_status_code ret = 0; + struct i40iw_puda_rsrc *rsrc; + u32 pudasize; + u32 sqwridsize, rqwridsize; + struct i40iw_virt_mem *vmem; + + info->count = 1; + pudasize = sizeof(struct i40iw_puda_rsrc); + sqwridsize = info->sq_size * sizeof(struct i40iw_sq_uk_wr_trk_info); + rqwridsize = info->rq_size * 8; + switch (info->type) { + case I40IW_PUDA_RSRC_TYPE_ILQ: + vmem = &vsi->ilq_mem; + break; + case I40IW_PUDA_RSRC_TYPE_IEQ: + vmem = &vsi->ieq_mem; + break; + default: + return I40IW_NOT_SUPPORTED; + } + ret = + i40iw_allocate_virt_mem(dev->hw, vmem, + pudasize + sqwridsize + rqwridsize); + if (ret) + return ret; + rsrc = (struct i40iw_puda_rsrc *)vmem->va; + spin_lock_init(&rsrc->bufpool_lock); + if (info->type == I40IW_PUDA_RSRC_TYPE_ILQ) { + vsi->ilq = (struct i40iw_puda_rsrc *)vmem->va; + vsi->ilq_count = info->count; + rsrc->receive = info->receive; + rsrc->xmit_complete = info->xmit_complete; + } else { + vmem = &vsi->ieq_mem; + vsi->ieq_count = info->count; + vsi->ieq = (struct i40iw_puda_rsrc *)vmem->va; + rsrc->receive = i40iw_ieq_receive; + rsrc->xmit_complete = i40iw_ieq_tx_compl; + } + + rsrc->type = info->type; + rsrc->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)((u8 *)vmem->va + pudasize); + rsrc->rq_wrid_array = (u64 *)((u8 *)vmem->va + pudasize + sqwridsize); + /* Initialize all ieq lists */ + INIT_LIST_HEAD(&rsrc->bufpool); + INIT_LIST_HEAD(&rsrc->txpend); + + rsrc->tx_wqe_avail_cnt = info->sq_size - 1; + dev->iw_pd_ops->pd_init(dev, &rsrc->sc_pd, info->pd_id, -1); + rsrc->qp_id = info->qp_id; + rsrc->cq_id = info->cq_id; + rsrc->sq_size = info->sq_size; + rsrc->rq_size = info->rq_size; + rsrc->cq_size = info->rq_size + info->sq_size; + rsrc->buf_size = info->buf_size; + rsrc->dev = dev; + rsrc->vsi = vsi; + + ret = i40iw_puda_cq_create(rsrc); + if (!ret) { + rsrc->completion = PUDA_CQ_CREATED; + ret = i40iw_puda_qp_create(rsrc); + } + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error qp_create\n", + __func__); + goto error; + } + rsrc->completion = PUDA_QP_CREATED; + + ret = i40iw_puda_allocbufs(rsrc, info->tx_buf_cnt + info->rq_size); + if (ret) { + i40iw_debug(dev, I40IW_DEBUG_PUDA, "[%s] error alloc_buf\n", + __func__); + goto error; + } + + rsrc->rxq_invalid_cnt = info->rq_size; + ret = i40iw_puda_replenish_rq(rsrc, true); + if (ret) + goto error; + + if (info->type == I40IW_PUDA_RSRC_TYPE_IEQ) { + if (!i40iw_init_hash_desc(&rsrc->hash_desc)) { + rsrc->check_crc = true; + rsrc->completion = PUDA_HASH_CRC_COMPLETE; + ret = 0; + } + } + + dev->ccq_ops->ccq_arm(&rsrc->cq); + return ret; + error: + i40iw_puda_dele_resources(vsi, info->type, false); + + return ret; +} + +/** + * i40iw_ilq_putback_rcvbuf - ilq buffer to put back on rq + * @qp: ilq's qp resource + * @wqe_idx: wqe index of completed rcvbuf + */ +static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx) +{ + u64 *wqe; + u64 offset24; + + wqe = qp->qp_uk.rq_base[wqe_idx].elem; + get_64bit_val(wqe, 24, &offset24); + offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); + set_64bit_val(wqe, 24, offset24); +} + +/** + * i40iw_ieq_get_fpdu - given length return fpdu length + * @length: length if fpdu + */ +static u16 i40iw_ieq_get_fpdu_length(u16 length) +{ + u16 fpdu_len; + + fpdu_len = length + I40IW_IEQ_MPA_FRAMING; + fpdu_len = (fpdu_len + 3) & 0xfffffffc; + return fpdu_len; +} + +/** + * i40iw_ieq_copy_to_txbuf - copydata from rcv buf to tx buf + * @buf: rcv buffer with partial + * @txbuf: tx buffer for sendign back + * @buf_offset: rcv buffer offset to copy from + * @txbuf_offset: at offset in tx buf to copy + * @length: length of data to copy + */ +static void i40iw_ieq_copy_to_txbuf(struct i40iw_puda_buf *buf, + struct i40iw_puda_buf *txbuf, + u16 buf_offset, u32 txbuf_offset, + u32 length) +{ + void *mem1 = (u8 *)buf->mem.va + buf_offset; + void *mem2 = (u8 *)txbuf->mem.va + txbuf_offset; + + memcpy(mem2, mem1, length); +} + +/** + * i40iw_ieq_setup_tx_buf - setup tx buffer for partial handling + * @buf: reeive buffer with partial + * @txbuf: buffer to prepare + */ +static void i40iw_ieq_setup_tx_buf(struct i40iw_puda_buf *buf, + struct i40iw_puda_buf *txbuf) +{ + txbuf->maclen = buf->maclen; + txbuf->tcphlen = buf->tcphlen; + txbuf->ipv4 = buf->ipv4; + txbuf->hdrlen = buf->hdrlen; + i40iw_ieq_copy_to_txbuf(buf, txbuf, 0, 0, buf->hdrlen); +} + +/** + * i40iw_ieq_check_first_buf - check if rcv buffer's seq is in range + * @buf: receive exception buffer + * @fps: first partial sequence number + */ +static void i40iw_ieq_check_first_buf(struct i40iw_puda_buf *buf, u32 fps) +{ + u32 offset; + + if (buf->seqnum < fps) { + offset = fps - buf->seqnum; + if (offset > buf->datalen) + return; + buf->data += offset; + buf->datalen -= (u16)offset; + buf->seqnum = fps; + } +} + +/** + * i40iw_ieq_compl_pfpdu - write txbuf with full fpdu + * @ieq: ieq resource + * @rxlist: ieq's received buffer list + * @pbufl: temporary list for buffers for fpddu + * @txbuf: tx buffer for fpdu + * @fpdu_len: total length of fpdu + */ +static void i40iw_ieq_compl_pfpdu(struct i40iw_puda_rsrc *ieq, + struct list_head *rxlist, + struct list_head *pbufl, + struct i40iw_puda_buf *txbuf, + u16 fpdu_len) +{ + struct i40iw_puda_buf *buf; + u32 nextseqnum; + u16 txoffset, bufoffset; + + buf = i40iw_puda_get_listbuf(pbufl); + if (!buf) + return; + nextseqnum = buf->seqnum + fpdu_len; + txbuf->totallen = buf->hdrlen + fpdu_len; + txbuf->data = (u8 *)txbuf->mem.va + buf->hdrlen; + i40iw_ieq_setup_tx_buf(buf, txbuf); + + txoffset = buf->hdrlen; + bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); + + do { + if (buf->datalen >= fpdu_len) { + /* copied full fpdu */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, fpdu_len); + buf->datalen -= fpdu_len; + buf->data += fpdu_len; + buf->seqnum = nextseqnum; + break; + } + /* copy partial fpdu */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, bufoffset, txoffset, buf->datalen); + txoffset += buf->datalen; + fpdu_len -= buf->datalen; + i40iw_puda_ret_bufpool(ieq, buf); + buf = i40iw_puda_get_listbuf(pbufl); + if (!buf) + return; + bufoffset = (u16)(buf->data - (u8 *)buf->mem.va); + } while (1); + + /* last buffer on the list*/ + if (buf->datalen) + list_add(&buf->list, rxlist); + else + i40iw_puda_ret_bufpool(ieq, buf); +} + +/** + * i40iw_ieq_create_pbufl - create buffer list for single fpdu + * @rxlist: resource list for receive ieq buffes + * @pbufl: temp. list for buffers for fpddu + * @buf: first receive buffer + * @fpdu_len: total length of fpdu + */ +static enum i40iw_status_code i40iw_ieq_create_pbufl( + struct i40iw_pfpdu *pfpdu, + struct list_head *rxlist, + struct list_head *pbufl, + struct i40iw_puda_buf *buf, + u16 fpdu_len) +{ + enum i40iw_status_code status = 0; + struct i40iw_puda_buf *nextbuf; + u32 nextseqnum; + u16 plen = fpdu_len - buf->datalen; + bool done = false; + + nextseqnum = buf->seqnum + buf->datalen; + do { + nextbuf = i40iw_puda_get_listbuf(rxlist); + if (!nextbuf) { + status = I40IW_ERR_list_empty; + break; + } + list_add_tail(&nextbuf->list, pbufl); + if (nextbuf->seqnum != nextseqnum) { + pfpdu->bad_seq_num++; + status = I40IW_ERR_SEQ_NUM; + break; + } + if (nextbuf->datalen >= plen) { + done = true; + } else { + plen -= nextbuf->datalen; + nextseqnum = nextbuf->seqnum + nextbuf->datalen; + } + + } while (!done); + + return status; +} + +/** + * i40iw_ieq_handle_partial - process partial fpdu buffer + * @ieq: ieq resource + * @pfpdu: partial management per user qp + * @buf: receive buffer + * @fpdu_len: fpdu len in the buffer + */ +static enum i40iw_status_code i40iw_ieq_handle_partial(struct i40iw_puda_rsrc *ieq, + struct i40iw_pfpdu *pfpdu, + struct i40iw_puda_buf *buf, + u16 fpdu_len) +{ + enum i40iw_status_code status = 0; + u8 *crcptr; + u32 mpacrc; + u32 seqnum = buf->seqnum; + struct list_head pbufl; /* partial buffer list */ + struct i40iw_puda_buf *txbuf = NULL; + struct list_head *rxlist = &pfpdu->rxlist; + + INIT_LIST_HEAD(&pbufl); + list_add(&buf->list, &pbufl); + + status = i40iw_ieq_create_pbufl(pfpdu, rxlist, &pbufl, buf, fpdu_len); + if (status) + goto error; + + txbuf = i40iw_puda_get_bufpool(ieq); + if (!txbuf) { + pfpdu->no_tx_bufs++; + status = I40IW_ERR_NO_TXBUFS; + goto error; + } + + i40iw_ieq_compl_pfpdu(ieq, rxlist, &pbufl, txbuf, fpdu_len); + i40iw_ieq_update_tcpip_info(txbuf, fpdu_len, seqnum); + crcptr = txbuf->data + fpdu_len - 4; + mpacrc = *(u32 *)crcptr; + if (ieq->check_crc) { + status = i40iw_ieq_check_mpacrc(ieq->hash_desc, txbuf->data, + (fpdu_len - 4), mpacrc); + if (status) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error bad crc\n", __func__); + goto error; + } + } + + i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "IEQ TX BUFFER", + txbuf->mem.va, txbuf->totallen); + i40iw_puda_send_buf(ieq, txbuf); + pfpdu->rcv_nxt = seqnum + fpdu_len; + return status; + error: + while (!list_empty(&pbufl)) { + buf = (struct i40iw_puda_buf *)(pbufl.prev); + list_del(&buf->list); + list_add(&buf->list, rxlist); + } + if (txbuf) + i40iw_puda_ret_bufpool(ieq, txbuf); + return status; +} + +/** + * i40iw_ieq_process_buf - process buffer rcvd for ieq + * @ieq: ieq resource + * @pfpdu: partial management per user qp + * @buf: receive buffer + */ +static enum i40iw_status_code i40iw_ieq_process_buf(struct i40iw_puda_rsrc *ieq, + struct i40iw_pfpdu *pfpdu, + struct i40iw_puda_buf *buf) +{ + u16 fpdu_len = 0; + u16 datalen = buf->datalen; + u8 *datap = buf->data; + u8 *crcptr; + u16 ioffset = 0; + u32 mpacrc; + u32 seqnum = buf->seqnum; + u16 length = 0; + u16 full = 0; + bool partial = false; + struct i40iw_puda_buf *txbuf; + struct list_head *rxlist = &pfpdu->rxlist; + enum i40iw_status_code ret = 0; + enum i40iw_status_code status = 0; + + ioffset = (u16)(buf->data - (u8 *)buf->mem.va); + while (datalen) { + fpdu_len = i40iw_ieq_get_fpdu_length(ntohs(*(__be16 *)datap)); + if (fpdu_len > pfpdu->max_fpdu_data) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error bad fpdu_len\n", __func__); + status = I40IW_ERR_MPA_CRC; + list_add(&buf->list, rxlist); + return status; + } + + if (datalen < fpdu_len) { + partial = true; + break; + } + crcptr = datap + fpdu_len - 4; + mpacrc = *(u32 *)crcptr; + if (ieq->check_crc) + ret = i40iw_ieq_check_mpacrc(ieq->hash_desc, + datap, fpdu_len - 4, mpacrc); + if (ret) { + status = I40IW_ERR_MPA_CRC; + list_add(&buf->list, rxlist); + return status; + } + full++; + pfpdu->fpdu_processed++; + datap += fpdu_len; + length += fpdu_len; + datalen -= fpdu_len; + } + if (full) { + /* copy full pdu's in the txbuf and send them out */ + txbuf = i40iw_puda_get_bufpool(ieq); + if (!txbuf) { + pfpdu->no_tx_bufs++; + status = I40IW_ERR_NO_TXBUFS; + list_add(&buf->list, rxlist); + return status; + } + /* modify txbuf's buffer header */ + i40iw_ieq_setup_tx_buf(buf, txbuf); + /* copy full fpdu's to new buffer */ + i40iw_ieq_copy_to_txbuf(buf, txbuf, ioffset, buf->hdrlen, + length); + txbuf->totallen = buf->hdrlen + length; + + i40iw_ieq_update_tcpip_info(txbuf, length, buf->seqnum); + i40iw_puda_send_buf(ieq, txbuf); + + if (!datalen) { + pfpdu->rcv_nxt = buf->seqnum + length; + i40iw_puda_ret_bufpool(ieq, buf); + return status; + } + buf->data = datap; + buf->seqnum = seqnum + length; + buf->datalen = datalen; + pfpdu->rcv_nxt = buf->seqnum; + } + if (partial) + status = i40iw_ieq_handle_partial(ieq, pfpdu, buf, fpdu_len); + + return status; +} + +/** + * i40iw_ieq_process_fpdus - process fpdu's buffers on its list + * @qp: qp for which partial fpdus + * @ieq: ieq resource + */ +static void i40iw_ieq_process_fpdus(struct i40iw_sc_qp *qp, + struct i40iw_puda_rsrc *ieq) +{ + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + struct list_head *rxlist = &pfpdu->rxlist; + struct i40iw_puda_buf *buf; + enum i40iw_status_code status; + + do { + if (list_empty(rxlist)) + break; + buf = i40iw_puda_get_listbuf(rxlist); + if (!buf) { + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: error no buf\n", __func__); + break; + } + if (buf->seqnum != pfpdu->rcv_nxt) { + /* This could be out of order or missing packet */ + pfpdu->out_of_order++; + list_add(&buf->list, rxlist); + break; + } + /* keep processing buffers from the head of the list */ + status = i40iw_ieq_process_buf(ieq, pfpdu, buf); + if (status == I40IW_ERR_MPA_CRC) { + pfpdu->mpa_crc_err = true; + while (!list_empty(rxlist)) { + buf = i40iw_puda_get_listbuf(rxlist); + i40iw_puda_ret_bufpool(ieq, buf); + pfpdu->crc_err++; + } + /* create CQP for AE */ + i40iw_ieq_mpa_crc_ae(ieq->dev, qp); + } + } while (!status); +} + +/** + * i40iw_ieq_handle_exception - handle qp's exception + * @ieq: ieq resource + * @qp: qp receiving excpetion + * @buf: receive buffer + */ +static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, + struct i40iw_sc_qp *qp, + struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_buf *tmpbuf = NULL; + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx; + u32 rcv_wnd = hw_host_ctx[23]; + /* first partial seq # in q2 */ + u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET); + struct list_head *rxlist = &pfpdu->rxlist; + struct list_head *plist; + + pfpdu->total_ieq_bufs++; + + if (pfpdu->mpa_crc_err) { + pfpdu->crc_err++; + goto error; + } + if (pfpdu->mode && (fps != pfpdu->fps)) { + /* clean up qp as it is new partial sequence */ + i40iw_ieq_cleanup_qp(ieq, qp); + i40iw_debug(ieq->dev, I40IW_DEBUG_IEQ, + "%s: restarting new partial\n", __func__); + pfpdu->mode = false; + } + + if (!pfpdu->mode) { + i40iw_debug_buf(ieq->dev, I40IW_DEBUG_IEQ, "Q2 BUFFER", (u64 *)qp->q2_buf, 128); + /* First_Partial_Sequence_Number check */ + pfpdu->rcv_nxt = fps; + pfpdu->fps = fps; + pfpdu->mode = true; + pfpdu->max_fpdu_data = (buf->ipv4) ? (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV4) : + (ieq->vsi->mtu - I40IW_MTU_TO_MSS_IPV6); + pfpdu->pmode_count++; + INIT_LIST_HEAD(rxlist); + i40iw_ieq_check_first_buf(buf, fps); + } + + if (!(rcv_wnd >= (buf->seqnum - pfpdu->rcv_nxt))) { + pfpdu->bad_seq_num++; + goto error; + } + + if (!list_empty(rxlist)) { + tmpbuf = (struct i40iw_puda_buf *)rxlist->next; + while ((struct list_head *)tmpbuf != rxlist) { + if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) + break; + plist = &tmpbuf->list; + tmpbuf = (struct i40iw_puda_buf *)plist->next; + } + /* Insert buf before tmpbuf */ + list_add_tail(&buf->list, &tmpbuf->list); + } else { + list_add_tail(&buf->list, rxlist); + } + i40iw_ieq_process_fpdus(qp, ieq); + return; + error: + i40iw_puda_ret_bufpool(ieq, buf); +} + +/** + * i40iw_ieq_receive - received exception buffer + * @dev: iwarp device + * @buf: exception buffer received + */ +static void i40iw_ieq_receive(struct i40iw_sc_vsi *vsi, + struct i40iw_puda_buf *buf) +{ + struct i40iw_puda_rsrc *ieq = vsi->ieq; + struct i40iw_sc_qp *qp = NULL; + u32 wqe_idx = ieq->compl_rxwqe_idx; + + qp = i40iw_ieq_get_qp(vsi->dev, buf); + if (!qp) { + ieq->stats_bad_qp_id++; + i40iw_puda_ret_bufpool(ieq, buf); + } else { + i40iw_ieq_handle_exception(ieq, qp, buf); + } + /* + * ieq->rx_wqe_idx is used by i40iw_puda_replenish_rq() + * on which wqe_idx to start replenish rq + */ + if (!ieq->rxq_invalid_cnt) + ieq->rx_wqe_idx = wqe_idx; + ieq->rxq_invalid_cnt++; +} + +/** + * i40iw_ieq_tx_compl - put back after sending completed exception buffer + * @vsi: pointer to the vsi structure + * @sqwrid: pointer to puda buffer + */ +static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid) +{ + struct i40iw_puda_rsrc *ieq = vsi->ieq; + struct i40iw_puda_buf *buf = (struct i40iw_puda_buf *)sqwrid; + + i40iw_puda_ret_bufpool(ieq, buf); +} + +/** + * i40iw_ieq_cleanup_qp - qp is being destroyed + * @ieq: ieq resource + * @qp: all pending fpdu buffers + */ +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp) +{ + struct i40iw_puda_buf *buf; + struct i40iw_pfpdu *pfpdu = &qp->pfpdu; + struct list_head *rxlist = &pfpdu->rxlist; + + if (!pfpdu->mode) + return; + while (!list_empty(rxlist)) { + buf = i40iw_puda_get_listbuf(rxlist); + i40iw_puda_ret_bufpool(ieq, buf); + } +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h new file mode 100644 index 000000000..53a7d58c8 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h @@ -0,0 +1,188 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_PUDA_H +#define I40IW_PUDA_H + +#define I40IW_IEQ_MPA_FRAMING 6 + +struct i40iw_sc_dev; +struct i40iw_sc_qp; +struct i40iw_sc_cq; + +enum puda_resource_type { + I40IW_PUDA_RSRC_TYPE_ILQ = 1, + I40IW_PUDA_RSRC_TYPE_IEQ +}; + +enum puda_rsrc_complete { + PUDA_CQ_CREATED = 1, + PUDA_QP_CREATED, + PUDA_TX_COMPLETE, + PUDA_RX_COMPLETE, + PUDA_HASH_CRC_COMPLETE +}; + +struct i40iw_puda_completion_info { + struct i40iw_qp_uk *qp; + u8 q_type; + u8 vlan_valid; + u8 l3proto; + u8 l4proto; + u16 payload_len; + u32 compl_error; /* No_err=0, else major and minor err code */ + u32 qp_id; + u32 wqe_idx; +}; + +struct i40iw_puda_send_info { + u64 paddr; /* Physical address */ + u32 len; + u8 tcplen; + u8 maclen; + bool ipv4; + bool doloopback; + void *scratch; +}; + +struct i40iw_puda_buf { + struct list_head list; /* MUST be first entry */ + struct i40iw_dma_mem mem; /* DMA memory for the buffer */ + struct i40iw_puda_buf *next; /* for alloclist in rsrc struct */ + struct i40iw_virt_mem buf_mem; /* Buffer memory for this buffer */ + void *scratch; + u8 *iph; + u8 *tcph; + u8 *data; + u16 datalen; + u16 vlan_id; + u8 tcphlen; /* tcp length in bytes */ + u8 maclen; /* mac length in bytes */ + u32 totallen; /* machlen+iphlen+tcphlen+datalen */ + atomic_t refcount; + u8 hdrlen; + bool ipv4; + u32 seqnum; +}; + +struct i40iw_puda_rsrc_info { + enum puda_resource_type type; /* ILQ or IEQ */ + u32 count; + u16 pd_id; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u16 buf_size; + u16 mss; + u32 tx_buf_cnt; /* total bufs allocated will be rq_size + tx_buf_cnt */ + void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *); + void (*xmit_complete)(struct i40iw_sc_vsi *, void *); +}; + +struct i40iw_puda_rsrc { + struct i40iw_sc_cq cq; + struct i40iw_sc_qp qp; + struct i40iw_sc_pd sc_pd; + struct i40iw_sc_dev *dev; + struct i40iw_sc_vsi *vsi; + struct i40iw_dma_mem cqmem; + struct i40iw_dma_mem qpmem; + struct i40iw_virt_mem ilq_mem; + enum puda_rsrc_complete completion; + enum puda_resource_type type; + u16 buf_size; /*buffer must be max datalen + tcpip hdr + mac */ + u16 mss; + u32 cq_id; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 cq_size; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 compl_rxwqe_idx; + u32 rx_wqe_idx; + u32 rxq_invalid_cnt; + u32 tx_wqe_avail_cnt; + bool check_crc; + struct shash_desc *hash_desc; + struct list_head txpend; + struct list_head bufpool; /* free buffers pool list for recv and xmit */ + u32 alloc_buf_count; + u32 avail_buf_count; /* snapshot of currently available buffers */ + spinlock_t bufpool_lock; + struct i40iw_puda_buf *alloclist; + void (*receive)(struct i40iw_sc_vsi *, struct i40iw_puda_buf *); + void (*xmit_complete)(struct i40iw_sc_vsi *, void *); + /* puda stats */ + u64 stats_buf_alloc_fail; + u64 stats_pkt_rcvd; + u64 stats_pkt_sent; + u64 stats_rcvd_pkt_err; + u64 stats_sent_pkt_q; + u64 stats_bad_qp_id; +}; + +struct i40iw_puda_buf *i40iw_puda_get_bufpool(struct i40iw_puda_rsrc *rsrc); +void i40iw_puda_ret_bufpool(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf); +void i40iw_puda_send_buf(struct i40iw_puda_rsrc *rsrc, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, + struct i40iw_puda_send_info *info); +enum i40iw_status_code i40iw_puda_create_rsrc(struct i40iw_sc_vsi *vsi, + struct i40iw_puda_rsrc_info *info); +void i40iw_puda_dele_resources(struct i40iw_sc_vsi *vsi, + enum puda_resource_type type, + bool reset); +enum i40iw_status_code i40iw_puda_poll_completion(struct i40iw_sc_dev *dev, + struct i40iw_sc_cq *cq, u32 *compl_err); + +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf); +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, + void *addr, u32 length, u32 value); +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc); +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +void i40iw_free_hash_desc(struct shash_desc *desc); +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, + u32 seqnum); +enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); +void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); +void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq); +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_register.h b/drivers/infiniband/hw/i40iw/i40iw_register.h new file mode 100644 index 000000000..57768184e --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_register.h @@ -0,0 +1,1030 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_REGISTER_H +#define I40IW_REGISTER_H + +#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */ + +#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */ +#define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0 +#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) +#define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16 +#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT) +#define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31 +#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0 +#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1 +#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2 +#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) + +#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */ +#define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0 +#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT) +#define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1 +#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT) +#define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3 +#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) + +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ + +#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15 +#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK (0x1 << I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT) +#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */ +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4 +#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT) +#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */ +#define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0 +#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK 0xFF + +#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */ +#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */ +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */ +#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */ +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */ +#define I40E_PFPE_CQACK_PECQID_SHIFT 0 +#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT) +#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */ +#define I40E_PFPE_CQARM_PECQID_SHIFT 0 +#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT) +#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */ +#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */ +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */ +#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */ +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */ +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */ +#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */ +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */ +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */ +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT) + +#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */ +#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) + +#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC_MAX_INDEX 127 +#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW_MAX_INDEX 127 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQACK_MAX_INDEX 127 +#define I40E_VFPE_CQACK_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT) +#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQARM_MAX_INDEX 127 +#define I40E_VFPE_CQARM_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT) +#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPDB_MAX_INDEX 127 +#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL_MAX_INDEX 127 +#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127 +#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC_MAX_INDEX 127 +#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT) + +#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0 +#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT) +#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0 +#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT) +#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0 +#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT) +#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */ +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0 +#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK (0xFFFF << I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT) +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17 +#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT) +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18 +#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK (0x1 << I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT) +#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */ +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0 +#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK (0x1 << I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT) +#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK (0xFF << I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT) +#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK (0xFF << I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT) +#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0 +#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK (0xFF << I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26 +#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27 +#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28 +#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29 +#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30 +#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK (0x1 << I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT) +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31 +#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK (0x1 << I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT) +#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK (0xFF << I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT) +#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0 +#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK (0xFF << I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT) +#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */ +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0 +#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK (0x1 << I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT) +#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT) +#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */ +#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0 +#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK (0xFFFF << I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0 +#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8 +#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT) +#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0 +#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT) +#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0 +#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1 +#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2 +#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3 +#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT) +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4 +#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0 +#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT) +#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31 +#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT) + +#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */ +#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT) +#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0 +#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT) +#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT) +#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0 +#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT) +#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0 +#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT) +#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */ +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0 +#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT) +#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT) +#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0 +#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT) +#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0 +#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT) +#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT) +#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */ +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0 +#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0 +#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0 +#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT) +#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0 +#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT) +#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT) +#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT) +#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT) +#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT) +#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0 +#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT) +#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT) +#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT) +#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT) +#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0 +#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT) +#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0 +#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT) +#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0 +#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT) +#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT) +#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0 +#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT) +#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0 +#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT) +#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT) +#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0 +#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT) +#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */ +#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0 +#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT) + +#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ +#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 +#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) +#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 +#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) +#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 +#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) +#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 +#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 +#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK (0x7 << I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 +#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK (0x3F << I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 +#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) +#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ +#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 +#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT) +#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ +#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 +#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT) +#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ +#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 +#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT) +#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 +#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 +#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) +#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ +#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 +#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 +#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) +#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ +#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 +#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 +#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) +#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 +#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) +#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 +#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) +#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 +#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) +#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ +#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 +#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 +#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) +#endif /* I40IW_REGISTER_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_status.h b/drivers/infiniband/hw/i40iw/i40iw_status.h new file mode 100644 index 000000000..f7013f11d --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_status.h @@ -0,0 +1,101 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_STATUS_H +#define I40IW_STATUS_H + +/* Error Codes */ +enum i40iw_status_code { + I40IW_SUCCESS = 0, + I40IW_ERR_NVM = -1, + I40IW_ERR_NVM_CHECKSUM = -2, + I40IW_ERR_CONFIG = -4, + I40IW_ERR_PARAM = -5, + I40IW_ERR_DEVICE_NOT_SUPPORTED = -6, + I40IW_ERR_RESET_FAILED = -7, + I40IW_ERR_SWFW_SYNC = -8, + I40IW_ERR_NO_MEMORY = -9, + I40IW_ERR_BAD_PTR = -10, + I40IW_ERR_INVALID_PD_ID = -11, + I40IW_ERR_INVALID_QP_ID = -12, + I40IW_ERR_INVALID_CQ_ID = -13, + I40IW_ERR_INVALID_CEQ_ID = -14, + I40IW_ERR_INVALID_AEQ_ID = -15, + I40IW_ERR_INVALID_SIZE = -16, + I40IW_ERR_INVALID_ARP_INDEX = -17, + I40IW_ERR_INVALID_FPM_FUNC_ID = -18, + I40IW_ERR_QP_INVALID_MSG_SIZE = -19, + I40IW_ERR_QP_TOOMANY_WRS_POSTED = -20, + I40IW_ERR_INVALID_FRAG_COUNT = -21, + I40IW_ERR_QUEUE_EMPTY = -22, + I40IW_ERR_INVALID_ALIGNMENT = -23, + I40IW_ERR_FLUSHED_QUEUE = -24, + I40IW_ERR_INVALID_PUSH_PAGE_INDEX = -25, + I40IW_ERR_INVALID_INLINE_DATA_SIZE = -26, + I40IW_ERR_TIMEOUT = -27, + I40IW_ERR_OPCODE_MISMATCH = -28, + I40IW_ERR_CQP_COMPL_ERROR = -29, + I40IW_ERR_INVALID_VF_ID = -30, + I40IW_ERR_INVALID_HMCFN_ID = -31, + I40IW_ERR_BACKING_PAGE_ERROR = -32, + I40IW_ERR_NO_PBLCHUNKS_AVAILABLE = -33, + I40IW_ERR_INVALID_PBLE_INDEX = -34, + I40IW_ERR_INVALID_SD_INDEX = -35, + I40IW_ERR_INVALID_PAGE_DESC_INDEX = -36, + I40IW_ERR_INVALID_SD_TYPE = -37, + I40IW_ERR_MEMCPY_FAILED = -38, + I40IW_ERR_INVALID_HMC_OBJ_INDEX = -39, + I40IW_ERR_INVALID_HMC_OBJ_COUNT = -40, + I40IW_ERR_INVALID_SRQ_ARM_LIMIT = -41, + I40IW_ERR_SRQ_ENABLED = -42, + I40IW_ERR_BUF_TOO_SHORT = -43, + I40IW_ERR_BAD_IWARP_CQE = -44, + I40IW_ERR_NVM_BLANK_MODE = -45, + I40IW_ERR_NOT_IMPLEMENTED = -46, + I40IW_ERR_PE_DOORBELL_NOT_ENABLED = -47, + I40IW_ERR_NOT_READY = -48, + I40IW_NOT_SUPPORTED = -49, + I40IW_ERR_FIRMWARE_API_VERSION = -50, + I40IW_ERR_RING_FULL = -51, + I40IW_ERR_MPA_CRC = -61, + I40IW_ERR_NO_TXBUFS = -62, + I40IW_ERR_SEQ_NUM = -63, + I40IW_ERR_list_empty = -64, + I40IW_ERR_INVALID_MAC_ADDR = -65, + I40IW_ERR_BAD_STAG = -66, + I40IW_ERR_CQ_COMPL_ERROR = -67, + I40IW_ERR_QUEUE_DESTROYED = -68 + +}; +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h new file mode 100644 index 000000000..adc8d2ec5 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h @@ -0,0 +1,1363 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_TYPE_H +#define I40IW_TYPE_H +#include "i40iw_user.h" +#include "i40iw_hmc.h" +#include "i40iw_vf.h" +#include "i40iw_virtchnl.h" + +struct i40iw_cqp_sq_wqe { + u64 buf[I40IW_CQP_WQE_SIZE]; +}; + +struct i40iw_sc_aeqe { + u64 buf[I40IW_AEQE_SIZE]; +}; + +struct i40iw_ceqe { + u64 buf[I40IW_CEQE_SIZE]; +}; + +struct i40iw_cqp_ctx { + u64 buf[I40IW_CQP_CTX_SIZE]; +}; + +struct i40iw_cq_shadow_area { + u64 buf[I40IW_SHADOW_AREA_SIZE]; +}; + +struct i40iw_sc_dev; +struct i40iw_hmc_info; +struct i40iw_vsi_pestat; + +struct i40iw_cqp_ops; +struct i40iw_ccq_ops; +struct i40iw_ceq_ops; +struct i40iw_aeq_ops; +struct i40iw_mr_ops; +struct i40iw_cqp_misc_ops; +struct i40iw_pd_ops; +struct i40iw_priv_qp_ops; +struct i40iw_priv_cq_ops; +struct i40iw_hmc_ops; + +enum i40iw_page_size { + I40IW_PAGE_SIZE_4K, + I40IW_PAGE_SIZE_2M +}; + +enum i40iw_resource_indicator_type { + I40IW_RSRC_INDICATOR_TYPE_ADAPTER = 0, + I40IW_RSRC_INDICATOR_TYPE_CQ, + I40IW_RSRC_INDICATOR_TYPE_QP, + I40IW_RSRC_INDICATOR_TYPE_SRQ +}; + +enum i40iw_hdrct_flags { + DDP_LEN_FLAG = 0x80, + DDP_HDR_FLAG = 0x40, + RDMA_HDR_FLAG = 0x20 +}; + +enum i40iw_term_layers { + LAYER_RDMA = 0, + LAYER_DDP = 1, + LAYER_MPA = 2 +}; + +enum i40iw_term_error_types { + RDMAP_REMOTE_PROT = 1, + RDMAP_REMOTE_OP = 2, + DDP_CATASTROPHIC = 0, + DDP_TAGGED_BUFFER = 1, + DDP_UNTAGGED_BUFFER = 2, + DDP_LLP = 3 +}; + +enum i40iw_term_rdma_errors { + RDMAP_INV_STAG = 0x00, + RDMAP_INV_BOUNDS = 0x01, + RDMAP_ACCESS = 0x02, + RDMAP_UNASSOC_STAG = 0x03, + RDMAP_TO_WRAP = 0x04, + RDMAP_INV_RDMAP_VER = 0x05, + RDMAP_UNEXPECTED_OP = 0x06, + RDMAP_CATASTROPHIC_LOCAL = 0x07, + RDMAP_CATASTROPHIC_GLOBAL = 0x08, + RDMAP_CANT_INV_STAG = 0x09, + RDMAP_UNSPECIFIED = 0xff +}; + +enum i40iw_term_ddp_errors { + DDP_CATASTROPHIC_LOCAL = 0x00, + DDP_TAGGED_INV_STAG = 0x00, + DDP_TAGGED_BOUNDS = 0x01, + DDP_TAGGED_UNASSOC_STAG = 0x02, + DDP_TAGGED_TO_WRAP = 0x03, + DDP_TAGGED_INV_DDP_VER = 0x04, + DDP_UNTAGGED_INV_QN = 0x01, + DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02, + DDP_UNTAGGED_INV_MSN_RANGE = 0x03, + DDP_UNTAGGED_INV_MO = 0x04, + DDP_UNTAGGED_INV_TOO_LONG = 0x05, + DDP_UNTAGGED_INV_DDP_VER = 0x06 +}; + +enum i40iw_term_mpa_errors { + MPA_CLOSED = 0x01, + MPA_CRC = 0x02, + MPA_MARKER = 0x03, + MPA_REQ_RSP = 0x04, +}; + +enum i40iw_flush_opcode { + FLUSH_INVALID = 0, + FLUSH_PROT_ERR, + FLUSH_REM_ACCESS_ERR, + FLUSH_LOC_QP_OP_ERR, + FLUSH_REM_OP_ERR, + FLUSH_LOC_LEN_ERR, + FLUSH_GENERAL_ERR, + FLUSH_FATAL_ERR +}; + +enum i40iw_term_eventtypes { + TERM_EVENT_QP_FATAL, + TERM_EVENT_QP_ACCESS_ERR +}; + +struct i40iw_terminate_hdr { + u8 layer_etype; + u8 error_code; + u8 hdrct; + u8 rsvd; +}; + +enum i40iw_debug_flag { + I40IW_DEBUG_NONE = 0x00000000, + I40IW_DEBUG_ERR = 0x00000001, + I40IW_DEBUG_INIT = 0x00000002, + I40IW_DEBUG_DEV = 0x00000004, + I40IW_DEBUG_CM = 0x00000008, + I40IW_DEBUG_VERBS = 0x00000010, + I40IW_DEBUG_PUDA = 0x00000020, + I40IW_DEBUG_ILQ = 0x00000040, + I40IW_DEBUG_IEQ = 0x00000080, + I40IW_DEBUG_QP = 0x00000100, + I40IW_DEBUG_CQ = 0x00000200, + I40IW_DEBUG_MR = 0x00000400, + I40IW_DEBUG_PBLE = 0x00000800, + I40IW_DEBUG_WQE = 0x00001000, + I40IW_DEBUG_AEQ = 0x00002000, + I40IW_DEBUG_CQP = 0x00004000, + I40IW_DEBUG_HMC = 0x00008000, + I40IW_DEBUG_USER = 0x00010000, + I40IW_DEBUG_VIRT = 0x00020000, + I40IW_DEBUG_DCB = 0x00040000, + I40IW_DEBUG_CQE = 0x00800000, + I40IW_DEBUG_ALL = 0xFFFFFFFF +}; + +enum i40iw_hw_stats_index_32b { + I40IW_HW_STAT_INDEX_IP4RXDISCARD = 0, + I40IW_HW_STAT_INDEX_IP4RXTRUNC, + I40IW_HW_STAT_INDEX_IP4TXNOROUTE, + I40IW_HW_STAT_INDEX_IP6RXDISCARD, + I40IW_HW_STAT_INDEX_IP6RXTRUNC, + I40IW_HW_STAT_INDEX_IP6TXNOROUTE, + I40IW_HW_STAT_INDEX_TCPRTXSEG, + I40IW_HW_STAT_INDEX_TCPRXOPTERR, + I40IW_HW_STAT_INDEX_TCPRXPROTOERR, + I40IW_HW_STAT_INDEX_MAX_32 +}; + +enum i40iw_hw_stats_index_64b { + I40IW_HW_STAT_INDEX_IP4RXOCTS = 0, + I40IW_HW_STAT_INDEX_IP4RXPKTS, + I40IW_HW_STAT_INDEX_IP4RXFRAGS, + I40IW_HW_STAT_INDEX_IP4RXMCPKTS, + I40IW_HW_STAT_INDEX_IP4TXOCTS, + I40IW_HW_STAT_INDEX_IP4TXPKTS, + I40IW_HW_STAT_INDEX_IP4TXFRAGS, + I40IW_HW_STAT_INDEX_IP4TXMCPKTS, + I40IW_HW_STAT_INDEX_IP6RXOCTS, + I40IW_HW_STAT_INDEX_IP6RXPKTS, + I40IW_HW_STAT_INDEX_IP6RXFRAGS, + I40IW_HW_STAT_INDEX_IP6RXMCPKTS, + I40IW_HW_STAT_INDEX_IP6TXOCTS, + I40IW_HW_STAT_INDEX_IP6TXPKTS, + I40IW_HW_STAT_INDEX_IP6TXFRAGS, + I40IW_HW_STAT_INDEX_IP6TXMCPKTS, + I40IW_HW_STAT_INDEX_TCPRXSEGS, + I40IW_HW_STAT_INDEX_TCPTXSEG, + I40IW_HW_STAT_INDEX_RDMARXRDS, + I40IW_HW_STAT_INDEX_RDMARXSNDS, + I40IW_HW_STAT_INDEX_RDMARXWRS, + I40IW_HW_STAT_INDEX_RDMATXRDS, + I40IW_HW_STAT_INDEX_RDMATXSNDS, + I40IW_HW_STAT_INDEX_RDMATXWRS, + I40IW_HW_STAT_INDEX_RDMAVBND, + I40IW_HW_STAT_INDEX_RDMAVINV, + I40IW_HW_STAT_INDEX_MAX_64 +}; + +struct i40iw_dev_hw_stats_offsets { + u32 stats_offset_32[I40IW_HW_STAT_INDEX_MAX_32]; + u32 stats_offset_64[I40IW_HW_STAT_INDEX_MAX_64]; +}; + +struct i40iw_dev_hw_stats { + u64 stats_value_32[I40IW_HW_STAT_INDEX_MAX_32]; + u64 stats_value_64[I40IW_HW_STAT_INDEX_MAX_64]; +}; + +struct i40iw_vsi_pestat { + struct i40iw_hw *hw; + struct i40iw_dev_hw_stats hw_stats; + struct i40iw_dev_hw_stats last_read_hw_stats; + struct i40iw_dev_hw_stats_offsets hw_stats_offsets; + struct timer_list stats_timer; + struct i40iw_sc_vsi *vsi; + spinlock_t lock; /* rdma stats lock */ +}; + +struct i40iw_hw { + u8 __iomem *hw_addr; + void *dev_context; + struct i40iw_hmc_info hmc; +}; + +struct i40iw_pfpdu { + struct list_head rxlist; + u32 rcv_nxt; + u32 fps; + u32 max_fpdu_data; + bool mode; + bool mpa_crc_err; + u64 total_ieq_bufs; + u64 fpdu_processed; + u64 bad_seq_num; + u64 crc_err; + u64 no_tx_bufs; + u64 tx_err; + u64 out_of_order; + u64 pmode_count; +}; + +struct i40iw_sc_pd { + u32 size; + struct i40iw_sc_dev *dev; + u16 pd_id; + int abi_ver; +}; + +struct i40iw_cqp_quanta { + u64 elem[I40IW_CQP_WQE_SIZE]; +}; + +struct i40iw_sc_cqp { + u32 size; + u64 sq_pa; + u64 host_ctx_pa; + void *back_cqp; + struct i40iw_sc_dev *dev; + enum i40iw_status_code (*process_cqp_sds)(struct i40iw_sc_dev *, + struct i40iw_update_sds_info *); + struct i40iw_dma_mem sdbuf; + struct i40iw_ring sq_ring; + struct i40iw_cqp_quanta *sq_base; + u64 *host_ctx; + u64 *scratch_array; + u32 cqp_id; + u32 sq_size; + u32 hw_sq_size; + u8 struct_ver; + u8 polarity; + bool en_datacenter_tcp; + u8 hmc_profile; + u8 enabled_vf_count; + u8 timeout_count; +}; + +struct i40iw_sc_aeq { + u32 size; + u64 aeq_elem_pa; + struct i40iw_sc_dev *dev; + struct i40iw_sc_aeqe *aeqe_base; + void *pbl_list; + u32 elem_cnt; + struct i40iw_ring aeq_ring; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + u8 polarity; +}; + +struct i40iw_sc_ceq { + u32 size; + u64 ceq_elem_pa; + struct i40iw_sc_dev *dev; + struct i40iw_ceqe *ceqe_base; + void *pbl_list; + u32 ceq_id; + u32 elem_cnt; + struct i40iw_ring ceq_ring; + bool virtual_map; + u8 pbl_chunk_size; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; + u8 polarity; +}; + +struct i40iw_sc_cq { + struct i40iw_cq_uk cq_uk; + u64 cq_pa; + u64 shadow_area_pa; + struct i40iw_sc_dev *dev; + struct i40iw_sc_vsi *vsi; + void *pbl_list; + void *back_cq; + u32 ceq_id; + u32 shadow_read_threshold; + bool ceqe_mask; + bool virtual_map; + u8 pbl_chunk_size; + u8 cq_type; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; + bool check_overflow; +}; + +struct i40iw_sc_qp { + struct i40iw_qp_uk qp_uk; + u64 sq_pa; + u64 rq_pa; + u64 hw_host_ctx_pa; + u64 shadow_area_pa; + u64 q2_pa; + struct i40iw_sc_dev *dev; + struct i40iw_sc_vsi *vsi; + struct i40iw_sc_pd *pd; + u64 *hw_host_ctx; + void *llp_stream_handle; + void *back_qp; + struct i40iw_pfpdu pfpdu; + u8 *q2_buf; + u64 qp_compl_ctx; + u16 qs_handle; + u16 push_idx; + u8 sq_tph_val; + u8 rq_tph_val; + u8 qp_state; + u8 qp_type; + u8 hw_sq_size; + u8 hw_rq_size; + u8 src_mac_addr_idx; + bool sq_tph_en; + bool rq_tph_en; + bool rcv_tph_en; + bool xmit_tph_en; + bool virtual_map; + bool flush_sq; + bool flush_rq; + u8 user_pri; + struct list_head list; + bool on_qoslist; + bool sq_flush; + enum i40iw_flush_opcode flush_code; + enum i40iw_term_eventtypes eventtype; + u8 term_flags; +}; + +struct i40iw_hmc_fpm_misc { + u32 max_ceqs; + u32 max_sds; + u32 xf_block_size; + u32 q1_block_size; + u32 ht_multiplier; + u32 timer_bucket; +}; + +struct i40iw_vchnl_if { + enum i40iw_status_code (*vchnl_recv)(struct i40iw_sc_dev *, u32, u8 *, u16); + enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *dev, u32, u8 *, u16); +}; + +#define I40IW_VCHNL_MAX_VF_MSG_SIZE 512 + +struct i40iw_vchnl_vf_msg_buffer { + struct i40iw_virtchnl_op_buf vchnl_msg; + char parm_buffer[I40IW_VCHNL_MAX_VF_MSG_SIZE - 1]; +}; + +struct i40iw_qos { + struct list_head qplist; + spinlock_t lock; /* qos list */ + u16 qs_handle; +}; + +struct i40iw_vfdev { + struct i40iw_sc_dev *pf_dev; + u8 *hmc_info_mem; + struct i40iw_vsi_pestat pestat; + struct i40iw_hmc_pble_info *pble_info; + struct i40iw_hmc_info hmc_info; + struct i40iw_vchnl_vf_msg_buffer vf_msg_buffer; + u64 fpm_query_buf_pa; + u64 *fpm_query_buf; + u32 vf_id; + u32 msg_count; + bool pf_hmc_initialized; + u16 pmf_index; + u16 iw_vf_idx; /* VF Device table index */ + bool stats_initialized; +}; + +#define I40IW_INVALID_FCN_ID 0xff +struct i40iw_sc_vsi { + struct i40iw_sc_dev *dev; + void *back_vsi; /* Owned by OS */ + u32 ilq_count; + struct i40iw_virt_mem ilq_mem; + struct i40iw_puda_rsrc *ilq; + u32 ieq_count; + struct i40iw_virt_mem ieq_mem; + struct i40iw_puda_rsrc *ieq; + u16 exception_lan_queue; + u16 mtu; + u8 fcn_id; + bool stats_fcn_id_alloc; + struct i40iw_qos qos[I40IW_MAX_USER_PRIORITY]; + struct i40iw_vsi_pestat *pestat; +}; + +struct i40iw_sc_dev { + struct list_head cqp_cmd_head; /* head of the CQP command list */ + spinlock_t cqp_lock; /* cqp list sync */ + struct i40iw_dev_uk dev_uk; + bool fcn_id_array[I40IW_MAX_STATS_COUNT]; + struct i40iw_dma_mem vf_fpm_query_buf[I40IW_MAX_PE_ENABLED_VF_COUNT]; + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + u64 *fpm_query_buf; + u64 *fpm_commit_buf; + void *back_dev; + struct i40iw_hw *hw; + u8 __iomem *db_addr; + struct i40iw_hmc_info *hmc_info; + struct i40iw_hmc_pble_info *pble_info; + struct i40iw_vfdev *vf_dev[I40IW_MAX_PE_ENABLED_VF_COUNT]; + struct i40iw_sc_cqp *cqp; + struct i40iw_sc_aeq *aeq; + struct i40iw_sc_ceq *ceq[I40IW_CEQ_MAX_COUNT]; + struct i40iw_sc_cq *ccq; + struct i40iw_cqp_ops *cqp_ops; + struct i40iw_ccq_ops *ccq_ops; + struct i40iw_ceq_ops *ceq_ops; + struct i40iw_aeq_ops *aeq_ops; + struct i40iw_pd_ops *iw_pd_ops; + struct i40iw_priv_qp_ops *iw_priv_qp_ops; + struct i40iw_priv_cq_ops *iw_priv_cq_ops; + struct i40iw_mr_ops *mr_ops; + struct i40iw_cqp_misc_ops *cqp_misc_ops; + struct i40iw_hmc_ops *hmc_ops; + struct i40iw_vchnl_if vchnl_if; + const struct i40iw_vf_cqp_ops *iw_vf_cqp_ops; + + struct i40iw_hmc_fpm_misc hmc_fpm_misc; + u32 debug_mask; + u8 hmc_fn_id; + bool is_pf; + bool vchnl_up; + bool ceq_valid; + u8 vf_id; + wait_queue_head_t vf_reqs; + u64 cqp_cmd_stats[OP_SIZE_CQP_STAT_ARRAY]; + struct i40iw_vchnl_vf_msg_buffer vchnl_vf_msg_buf; + u8 hw_rev; +}; + +struct i40iw_modify_cq_info { + u64 cq_pa; + struct i40iw_cqe *cq_base; + void *pbl_list; + u32 ceq_id; + u32 cq_size; + u32 shadow_read_threshold; + bool virtual_map; + u8 pbl_chunk_size; + bool check_overflow; + bool cq_resize; + bool ceq_change; + bool check_overflow_change; + u32 first_pm_pbl_idx; + bool ceq_valid; +}; + +struct i40iw_create_qp_info { + u8 next_iwarp_state; + bool ord_valid; + bool tcp_ctx_valid; + bool cq_num_valid; + bool arp_cache_idx_valid; +}; + +struct i40iw_modify_qp_info { + u64 rx_win0; + u64 rx_win1; + u8 next_iwarp_state; + u8 termlen; + bool ord_valid; + bool tcp_ctx_valid; + bool cq_num_valid; + bool arp_cache_idx_valid; + bool reset_tcp_conn; + bool remove_hash_idx; + bool dont_send_term; + bool dont_send_fin; + bool cached_var_valid; + bool force_loopback; +}; + +struct i40iw_ccq_cqe_info { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u32 op_ret_val; + u16 maj_err_code; + u16 min_err_code; + u8 op_code; + bool error; +}; + +struct i40iw_l2params { + u16 qs_handle_list[I40IW_MAX_USER_PRIORITY]; + u16 mtu; +}; + +struct i40iw_vsi_init_info { + struct i40iw_sc_dev *dev; + void *back_vsi; + struct i40iw_l2params *params; + u16 exception_lan_queue; +}; + +struct i40iw_vsi_stats_info { + struct i40iw_vsi_pestat *pestat; + u8 fcn_id; + bool alloc_fcn_id; + bool stats_initialize; +}; + +struct i40iw_device_init_info { + u64 fpm_query_buf_pa; + u64 fpm_commit_buf_pa; + u64 *fpm_query_buf; + u64 *fpm_commit_buf; + struct i40iw_hw *hw; + void __iomem *bar0; + enum i40iw_status_code (*vchnl_send)(struct i40iw_sc_dev *, u32, u8 *, u16); + u8 hmc_fn_id; + bool is_pf; + u32 debug_mask; +}; + +enum i40iw_cqp_hmc_profile { + I40IW_HMC_PROFILE_DEFAULT = 1, + I40IW_HMC_PROFILE_FAVOR_VF = 2, + I40IW_HMC_PROFILE_EQUAL = 3, +}; + +struct i40iw_cqp_init_info { + u64 cqp_compl_ctx; + u64 host_ctx_pa; + u64 sq_pa; + struct i40iw_sc_dev *dev; + struct i40iw_cqp_quanta *sq; + u64 *host_ctx; + u64 *scratch_array; + u32 sq_size; + u8 struct_ver; + bool en_datacenter_tcp; + u8 hmc_profile; + u8 enabled_vf_count; +}; + +struct i40iw_ceq_init_info { + u64 ceqe_pa; + struct i40iw_sc_dev *dev; + u64 *ceqe_base; + void *pbl_list; + u32 elem_cnt; + u32 ceq_id; + bool virtual_map; + u8 pbl_chunk_size; + bool tph_en; + u8 tph_val; + u32 first_pm_pbl_idx; +}; + +struct i40iw_aeq_init_info { + u64 aeq_elem_pa; + struct i40iw_sc_dev *dev; + u32 *aeqe_base; + void *pbl_list; + u32 elem_cnt; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; +}; + +struct i40iw_ccq_init_info { + u64 cq_pa; + u64 shadow_area_pa; + struct i40iw_sc_dev *dev; + struct i40iw_cqe *cq_base; + u64 *shadow_area; + void *pbl_list; + u32 num_elem; + u32 ceq_id; + u32 shadow_read_threshold; + bool ceqe_mask; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + bool avoid_mem_cflct; + bool virtual_map; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; +}; + +struct i40iwarp_offload_info { + u16 rcv_mark_offset; + u16 snd_mark_offset; + u16 pd_id; + u8 ddp_ver; + u8 rdmap_ver; + u8 ord_size; + u8 ird_size; + bool wr_rdresp_en; + bool rd_enable; + bool snd_mark_en; + bool rcv_mark_en; + bool bind_en; + bool fast_reg_en; + bool priv_mode_en; + bool lsmm_present; + u8 iwarp_mode; + bool align_hdrs; + bool rcv_no_mpa_crc; + + u8 last_byte_sent; +}; + +struct i40iw_tcp_offload_info { + bool ipv4; + bool no_nagle; + bool insert_vlan_tag; + bool time_stamp; + u8 cwnd_inc_limit; + bool drop_ooo_seg; + u8 dup_ack_thresh; + u8 ttl; + u8 src_mac_addr_idx; + bool avoid_stretch_ack; + u8 tos; + u16 src_port; + u16 dst_port; + u32 dest_ip_addr0; + u32 dest_ip_addr1; + u32 dest_ip_addr2; + u32 dest_ip_addr3; + u32 snd_mss; + u16 vlan_tag; + u16 arp_idx; + u32 flow_label; + bool wscale; + u8 tcp_state; + u8 snd_wscale; + u8 rcv_wscale; + u32 time_stamp_recent; + u32 time_stamp_age; + u32 snd_nxt; + u32 snd_wnd; + u32 rcv_nxt; + u32 rcv_wnd; + u32 snd_max; + u32 snd_una; + u32 srtt; + u32 rtt_var; + u32 ss_thresh; + u32 cwnd; + u32 snd_wl1; + u32 snd_wl2; + u32 max_snd_window; + u8 rexmit_thresh; + u32 local_ipaddr0; + u32 local_ipaddr1; + u32 local_ipaddr2; + u32 local_ipaddr3; + bool ignore_tcp_opt; + bool ignore_tcp_uns_opt; +}; + +struct i40iw_qp_host_ctx_info { + u64 qp_compl_ctx; + struct i40iw_tcp_offload_info *tcp_info; + struct i40iwarp_offload_info *iwarp_info; + u32 send_cq_num; + u32 rcv_cq_num; + u16 push_idx; + bool push_mode_en; + bool tcp_info_valid; + bool iwarp_info_valid; + bool err_rq_idx_valid; + u16 err_rq_idx; + bool add_to_qoslist; + u8 user_pri; +}; + +struct i40iw_aeqe_info { + u64 compl_ctx; + u32 qp_cq_id; + u16 ae_id; + u16 wqe_idx; + u8 tcp_state; + u8 iwarp_state; + bool qp; + bool cq; + bool sq; + bool in_rdrsp_wr; + bool out_rdrsp; + u8 q2_data_written; + bool aeqe_overflow; +}; + +struct i40iw_allocate_stag_info { + u64 total_len; + u32 chunk_size; + u32 stag_idx; + u32 page_size; + u16 pd_id; + u16 access_rights; + bool remote_access; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; +}; + +struct i40iw_reg_ns_stag_info { + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum i40iw_addressing_type addr_type; + i40iw_stag_index stag_idx; + u16 access_rights; + u16 pd_id; + i40iw_stag_key stag_key; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; +}; + +struct i40iw_fast_reg_stag_info { + u64 wr_id; + u64 reg_addr_pa; + u64 fbo; + void *va; + u64 total_len; + u32 page_size; + u32 chunk_size; + u32 first_pm_pbl_index; + enum i40iw_addressing_type addr_type; + i40iw_stag_index stag_idx; + u16 access_rights; + u16 pd_id; + i40iw_stag_key stag_key; + bool local_fence; + bool read_fence; + bool signaled; + bool use_hmc_fcn_index; + u8 hmc_fcn_index; + bool use_pf_rid; + bool defer_flag; +}; + +struct i40iw_dealloc_stag_info { + u32 stag_idx; + u16 pd_id; + bool mr; + bool dealloc_pbl; +}; + +struct i40iw_register_shared_stag { + void *va; + enum i40iw_addressing_type addr_type; + i40iw_stag_index new_stag_idx; + i40iw_stag_index parent_stag_idx; + u32 access_rights; + u16 pd_id; + i40iw_stag_key new_stag_key; +}; + +struct i40iw_qp_init_info { + struct i40iw_qp_uk_init_info qp_uk_init_info; + struct i40iw_sc_pd *pd; + struct i40iw_sc_vsi *vsi; + u64 *host_ctx; + u8 *q2; + u64 sq_pa; + u64 rq_pa; + u64 host_ctx_pa; + u64 q2_pa; + u64 shadow_area_pa; + int abi_ver; + u8 sq_tph_val; + u8 rq_tph_val; + u8 type; + bool sq_tph_en; + bool rq_tph_en; + bool rcv_tph_en; + bool xmit_tph_en; + bool virtual_map; +}; + +struct i40iw_cq_init_info { + struct i40iw_sc_dev *dev; + u64 cq_base_pa; + u64 shadow_area_pa; + u32 ceq_id; + u32 shadow_read_threshold; + bool virtual_map; + bool ceqe_mask; + u8 pbl_chunk_size; + u32 first_pm_pbl_idx; + bool ceq_id_valid; + bool tph_en; + u8 tph_val; + u8 type; + struct i40iw_cq_uk_init_info cq_uk_init_info; +}; + +struct i40iw_upload_context_info { + u64 buf_pa; + bool freeze_qp; + bool raw_format; + u32 qp_id; + u8 qp_type; +}; + +struct i40iw_add_arp_cache_entry_info { + u8 mac_addr[6]; + u32 reach_max; + u16 arp_index; + bool permanent; +}; + +struct i40iw_apbvt_info { + u16 port; + bool add; +}; + +enum i40iw_quad_entry_type { + I40IW_QHASH_TYPE_TCP_ESTABLISHED = 1, + I40IW_QHASH_TYPE_TCP_SYN, +}; + +enum i40iw_quad_hash_manage_type { + I40IW_QHASH_MANAGE_TYPE_DELETE = 0, + I40IW_QHASH_MANAGE_TYPE_ADD, + I40IW_QHASH_MANAGE_TYPE_MODIFY +}; + +struct i40iw_qhash_table_info { + struct i40iw_sc_vsi *vsi; + enum i40iw_quad_hash_manage_type manage; + enum i40iw_quad_entry_type entry_type; + bool vlan_valid; + bool ipv4_valid; + u8 mac_addr[6]; + u16 vlan_id; + u8 user_pri; + u32 qp_num; + u32 dest_ip[4]; + u32 src_ip[4]; + u16 dest_port; + u16 src_port; +}; + +struct i40iw_local_mac_ipaddr_entry_info { + u8 mac_addr[6]; + u8 entry_idx; +}; + +struct i40iw_cqp_manage_push_page_info { + u32 push_idx; + u16 qs_handle; + u8 free_page; +}; + +struct i40iw_qp_flush_info { + u16 sq_minor_code; + u16 sq_major_code; + u16 rq_minor_code; + u16 rq_major_code; + u16 ae_code; + u8 ae_source; + bool sq; + bool rq; + bool userflushcode; + bool generate_ae; +}; + +struct i40iw_cqp_commit_fpm_values { + u64 qp_base; + u64 cq_base; + u32 hte_base; + u32 arp_base; + u32 apbvt_inuse_base; + u32 mr_base; + u32 xf_base; + u32 xffl_base; + u32 q1_base; + u32 q1fl_base; + u32 fsimc_base; + u32 fsiav_base; + u32 pbl_base; + + u32 qp_cnt; + u32 cq_cnt; + u32 hte_cnt; + u32 arp_cnt; + u32 mr_cnt; + u32 xf_cnt; + u32 xffl_cnt; + u32 q1_cnt; + u32 q1fl_cnt; + u32 fsimc_cnt; + u32 fsiav_cnt; + u32 pbl_cnt; +}; + +struct i40iw_cqp_query_fpm_values { + u16 first_pe_sd_index; + u32 qp_objsize; + u32 cq_objsize; + u32 hte_objsize; + u32 arp_objsize; + u32 mr_objsize; + u32 xf_objsize; + u32 q1_objsize; + u32 fsimc_objsize; + u32 fsiav_objsize; + + u32 qp_max; + u32 cq_max; + u32 hte_max; + u32 arp_max; + u32 mr_max; + u32 xf_max; + u32 xffl_max; + u32 q1_max; + u32 q1fl_max; + u32 fsimc_max; + u32 fsiav_max; + u32 pbl_max; +}; + +struct i40iw_gen_ae_info { + u16 ae_code; + u8 ae_source; +}; + +struct i40iw_cqp_ops { + enum i40iw_status_code (*cqp_init)(struct i40iw_sc_cqp *, + struct i40iw_cqp_init_info *); + enum i40iw_status_code (*cqp_create)(struct i40iw_sc_cqp *, u16 *, u16 *); + void (*cqp_post_sq)(struct i40iw_sc_cqp *); + u64 *(*cqp_get_next_send_wqe)(struct i40iw_sc_cqp *, u64 scratch); + enum i40iw_status_code (*cqp_destroy)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*poll_for_cqp_op_done)(struct i40iw_sc_cqp *, u8, + struct i40iw_ccq_cqe_info *); +}; + +struct i40iw_ccq_ops { + enum i40iw_status_code (*ccq_init)(struct i40iw_sc_cq *, + struct i40iw_ccq_init_info *); + enum i40iw_status_code (*ccq_create)(struct i40iw_sc_cq *, u64, bool, bool); + enum i40iw_status_code (*ccq_destroy)(struct i40iw_sc_cq *, u64, bool); + enum i40iw_status_code (*ccq_create_done)(struct i40iw_sc_cq *); + enum i40iw_status_code (*ccq_get_cqe_info)(struct i40iw_sc_cq *, + struct i40iw_ccq_cqe_info *); + void (*ccq_arm)(struct i40iw_sc_cq *); +}; + +struct i40iw_ceq_ops { + enum i40iw_status_code (*ceq_init)(struct i40iw_sc_ceq *, + struct i40iw_ceq_init_info *); + enum i40iw_status_code (*ceq_create)(struct i40iw_sc_ceq *, u64, bool); + enum i40iw_status_code (*cceq_create_done)(struct i40iw_sc_ceq *); + enum i40iw_status_code (*cceq_destroy_done)(struct i40iw_sc_ceq *); + enum i40iw_status_code (*cceq_create)(struct i40iw_sc_ceq *, u64); + enum i40iw_status_code (*ceq_destroy)(struct i40iw_sc_ceq *, u64, bool); + void *(*process_ceq)(struct i40iw_sc_dev *, struct i40iw_sc_ceq *); +}; + +struct i40iw_aeq_ops { + enum i40iw_status_code (*aeq_init)(struct i40iw_sc_aeq *, + struct i40iw_aeq_init_info *); + enum i40iw_status_code (*aeq_create)(struct i40iw_sc_aeq *, u64, bool); + enum i40iw_status_code (*aeq_destroy)(struct i40iw_sc_aeq *, u64, bool); + enum i40iw_status_code (*get_next_aeqe)(struct i40iw_sc_aeq *, + struct i40iw_aeqe_info *); + enum i40iw_status_code (*repost_aeq_entries)(struct i40iw_sc_dev *, u32); + enum i40iw_status_code (*aeq_create_done)(struct i40iw_sc_aeq *); + enum i40iw_status_code (*aeq_destroy_done)(struct i40iw_sc_aeq *); +}; + +struct i40iw_pd_ops { + void (*pd_init)(struct i40iw_sc_dev *, struct i40iw_sc_pd *, u16, int); +}; + +struct i40iw_priv_qp_ops { + enum i40iw_status_code (*qp_init)(struct i40iw_sc_qp *, struct i40iw_qp_init_info *); + enum i40iw_status_code (*qp_create)(struct i40iw_sc_qp *, + struct i40iw_create_qp_info *, u64, bool); + enum i40iw_status_code (*qp_modify)(struct i40iw_sc_qp *, + struct i40iw_modify_qp_info *, u64, bool); + enum i40iw_status_code (*qp_destroy)(struct i40iw_sc_qp *, u64, bool, bool, bool); + enum i40iw_status_code (*qp_flush_wqes)(struct i40iw_sc_qp *, + struct i40iw_qp_flush_info *, u64, bool); + enum i40iw_status_code (*qp_upload_context)(struct i40iw_sc_dev *, + struct i40iw_upload_context_info *, + u64, bool); + enum i40iw_status_code (*qp_setctx)(struct i40iw_sc_qp *, u64 *, + struct i40iw_qp_host_ctx_info *); + + void (*qp_send_lsmm)(struct i40iw_sc_qp *, void *, u32, i40iw_stag); + void (*qp_send_lsmm_nostag)(struct i40iw_sc_qp *, void *, u32); + void (*qp_send_rtt)(struct i40iw_sc_qp *, bool); + enum i40iw_status_code (*qp_post_wqe0)(struct i40iw_sc_qp *, u8); + enum i40iw_status_code (*iw_mr_fast_register)(struct i40iw_sc_qp *, + struct i40iw_fast_reg_stag_info *, + bool); +}; + +struct i40iw_priv_cq_ops { + enum i40iw_status_code (*cq_init)(struct i40iw_sc_cq *, struct i40iw_cq_init_info *); + enum i40iw_status_code (*cq_create)(struct i40iw_sc_cq *, u64, bool, bool); + enum i40iw_status_code (*cq_destroy)(struct i40iw_sc_cq *, u64, bool); + enum i40iw_status_code (*cq_modify)(struct i40iw_sc_cq *, + struct i40iw_modify_cq_info *, u64, bool); +}; + +struct i40iw_mr_ops { + enum i40iw_status_code (*alloc_stag)(struct i40iw_sc_dev *, + struct i40iw_allocate_stag_info *, u64, bool); + enum i40iw_status_code (*mr_reg_non_shared)(struct i40iw_sc_dev *, + struct i40iw_reg_ns_stag_info *, + u64, bool); + enum i40iw_status_code (*mr_reg_shared)(struct i40iw_sc_dev *, + struct i40iw_register_shared_stag *, + u64, bool); + enum i40iw_status_code (*dealloc_stag)(struct i40iw_sc_dev *, + struct i40iw_dealloc_stag_info *, + u64, bool); + enum i40iw_status_code (*query_stag)(struct i40iw_sc_dev *, u64, u32, bool); + enum i40iw_status_code (*mw_alloc)(struct i40iw_sc_dev *, u64, u32, u16, bool); +}; + +struct i40iw_cqp_misc_ops { + enum i40iw_status_code (*manage_push_page)(struct i40iw_sc_cqp *, + struct i40iw_cqp_manage_push_page_info *, + u64, bool); + enum i40iw_status_code (*manage_hmc_pm_func_table)(struct i40iw_sc_cqp *, + u64, u8, bool, bool); + enum i40iw_status_code (*set_hmc_resource_profile)(struct i40iw_sc_cqp *, + u64, u8, u8, bool, bool); + enum i40iw_status_code (*commit_fpm_values)(struct i40iw_sc_cqp *, u64, u8, + struct i40iw_dma_mem *, bool, u8); + enum i40iw_status_code (*query_fpm_values)(struct i40iw_sc_cqp *, u64, u8, + struct i40iw_dma_mem *, bool, u8); + enum i40iw_status_code (*static_hmc_pages_allocated)(struct i40iw_sc_cqp *, + u64, u8, bool, bool); + enum i40iw_status_code (*add_arp_cache_entry)(struct i40iw_sc_cqp *, + struct i40iw_add_arp_cache_entry_info *, + u64, bool); + enum i40iw_status_code (*del_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool); + enum i40iw_status_code (*query_arp_cache_entry)(struct i40iw_sc_cqp *, u64, u16, bool); + enum i40iw_status_code (*manage_apbvt_entry)(struct i40iw_sc_cqp *, + struct i40iw_apbvt_info *, u64, bool); + enum i40iw_status_code (*manage_qhash_table_entry)(struct i40iw_sc_cqp *, + struct i40iw_qhash_table_info *, u64, bool); + enum i40iw_status_code (*alloc_local_mac_ipaddr_table_entry)(struct i40iw_sc_cqp *, u64, bool); + enum i40iw_status_code (*add_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, + struct i40iw_local_mac_ipaddr_entry_info *, + u64, bool); + enum i40iw_status_code (*del_local_mac_ipaddr_entry)(struct i40iw_sc_cqp *, u64, u8, u8, bool); + enum i40iw_status_code (*cqp_nop)(struct i40iw_sc_cqp *, u64, bool); + enum i40iw_status_code (*commit_fpm_values_done)(struct i40iw_sc_cqp + *); + enum i40iw_status_code (*query_fpm_values_done)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*manage_hmc_pm_func_table_done)(struct i40iw_sc_cqp *); + enum i40iw_status_code (*update_suspend_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64); + enum i40iw_status_code (*update_resume_qp)(struct i40iw_sc_cqp *, struct i40iw_sc_qp *, u64); +}; + +struct i40iw_hmc_ops { + enum i40iw_status_code (*init_iw_hmc)(struct i40iw_sc_dev *, u8); + enum i40iw_status_code (*parse_fpm_query_buf)(u64 *, struct i40iw_hmc_info *, + struct i40iw_hmc_fpm_misc *); + enum i40iw_status_code (*configure_iw_fpm)(struct i40iw_sc_dev *, u8); + enum i40iw_status_code (*parse_fpm_commit_buf)(u64 *, struct i40iw_hmc_obj_info *, u32 *sd); + enum i40iw_status_code (*create_hmc_object)(struct i40iw_sc_dev *dev, + struct i40iw_hmc_create_obj_info *); + enum i40iw_status_code (*del_hmc_object)(struct i40iw_sc_dev *dev, + struct i40iw_hmc_del_obj_info *, + bool reset); + enum i40iw_status_code (*pf_init_vfhmc)(struct i40iw_sc_dev *, u8, u32 *); + enum i40iw_status_code (*vf_configure_vffpm)(struct i40iw_sc_dev *, u32 *); +}; + +struct cqp_info { + union { + struct { + struct i40iw_sc_qp *qp; + struct i40iw_create_qp_info info; + u64 scratch; + } qp_create; + + struct { + struct i40iw_sc_qp *qp; + struct i40iw_modify_qp_info info; + u64 scratch; + } qp_modify; + + struct { + struct i40iw_sc_qp *qp; + u64 scratch; + bool remove_hash_idx; + bool ignore_mw_bnd; + } qp_destroy; + + struct { + struct i40iw_sc_cq *cq; + u64 scratch; + bool check_overflow; + } cq_create; + + struct { + struct i40iw_sc_cq *cq; + u64 scratch; + } cq_destroy; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_allocate_stag_info info; + u64 scratch; + } alloc_stag; + + struct { + struct i40iw_sc_dev *dev; + u64 scratch; + u32 mw_stag_index; + u16 pd_id; + } mw_alloc; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_reg_ns_stag_info info; + u64 scratch; + } mr_reg_non_shared; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_dealloc_stag_info info; + u64 scratch; + } dealloc_stag; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_local_mac_ipaddr_entry_info info; + u64 scratch; + } add_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_add_arp_cache_entry_info info; + u64 scratch; + } add_arp_cache_entry; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u8 entry_idx; + u8 ignore_ref_count; + } del_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + u16 arp_index; + } del_arp_cache_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_manage_vf_pble_info info; + u64 scratch; + } manage_vf_pble_bp; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_cqp_manage_push_page_info info; + u64 scratch; + } manage_push_page; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_upload_context_info info; + u64 scratch; + } qp_upload_context; + + struct { + struct i40iw_sc_cqp *cqp; + u64 scratch; + } alloc_local_mac_ipaddr_entry; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_hmc_fcn_info info; + u64 scratch; + } manage_hmc_pm; + + struct { + struct i40iw_sc_ceq *ceq; + u64 scratch; + } ceq_create; + + struct { + struct i40iw_sc_ceq *ceq; + u64 scratch; + } ceq_destroy; + + struct { + struct i40iw_sc_aeq *aeq; + u64 scratch; + } aeq_create; + + struct { + struct i40iw_sc_aeq *aeq; + u64 scratch; + } aeq_destroy; + + struct { + struct i40iw_sc_qp *qp; + struct i40iw_qp_flush_info info; + u64 scratch; + } qp_flush_wqes; + + struct { + struct i40iw_sc_qp *qp; + struct i40iw_gen_ae_info info; + u64 scratch; + } gen_ae; + + struct { + struct i40iw_sc_cqp *cqp; + void *fpm_values_va; + u64 fpm_values_pa; + u8 hmc_fn_id; + u64 scratch; + } query_fpm_values; + + struct { + struct i40iw_sc_cqp *cqp; + void *fpm_values_va; + u64 fpm_values_pa; + u8 hmc_fn_id; + u64 scratch; + } commit_fpm_values; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_apbvt_info info; + u64 scratch; + } manage_apbvt_entry; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_qhash_table_info info; + u64 scratch; + } manage_qhash_table_entry; + + struct { + struct i40iw_sc_dev *dev; + struct i40iw_update_sds_info info; + u64 scratch; + } update_pe_sds; + + struct { + struct i40iw_sc_cqp *cqp; + struct i40iw_sc_qp *qp; + u64 scratch; + } suspend_resume; + } u; +}; + +struct cqp_commands_info { + struct list_head cqp_cmd_entry; + u8 cqp_cmd; + u8 post_sq; + struct cqp_info in; +}; + +struct i40iw_virtchnl_work_info { + void (*callback_fcn)(void *vf_dev); + void *worker_vf_dev; +}; + +struct i40iw_cqp_timeout { + u64 compl_cqp_cmds; + u8 count; +}; + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_uk.c b/drivers/infiniband/hw/i40iw/i40iw_uk.c new file mode 100644 index 000000000..8afa5a67a --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_uk.c @@ -0,0 +1,1232 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_status.h" +#include "i40iw_d.h" +#include "i40iw_user.h" +#include "i40iw_register.h" + +static u32 nop_signature = 0x55550000; + +/** + * i40iw_nop_1 - insert a nop wqe and move head. no post work + * @qp: hw qp ptr + */ +static enum i40iw_status_code i40iw_nop_1(struct i40iw_qp_uk *qp) +{ + u64 header, *wqe; + u64 *wqe_0 = NULL; + u32 wqe_idx, peek_head; + bool signaled = false; + + if (!qp->sq_ring.head) + return I40IW_ERR_PARAM; + + wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + wqe = qp->sq_base[wqe_idx].elem; + + qp->sq_wrtrk_array[wqe_idx].wqe_size = I40IW_QP_WQE_MIN_SIZE; + + peek_head = (qp->sq_ring.head + 1) % qp->sq_ring.size; + wqe_0 = qp->sq_base[peek_head].elem; + if (peek_head) + wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); + else + wqe_0[3] = LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID) | nop_signature++; + + wmb(); /* Memory barrier to ensure data is written before valid bit is set */ + + set_64bit_val(wqe, 24, header); + return 0; +} + +/** + * i40iw_qp_post_wr - post wr to hrdware + * @qp: hw qp ptr + */ +void i40iw_qp_post_wr(struct i40iw_qp_uk *qp) +{ + u64 temp; + u32 hw_sq_tail; + u32 sw_sq_head; + + mb(); /* valid bit is written and loads completed before reading shadow */ + + /* read the doorbell shadow area */ + get_64bit_val(qp->shadow_area, 0, &temp); + + hw_sq_tail = (u32)RS_64(temp, I40IW_QP_DBSA_HW_SQ_TAIL); + sw_sq_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (sw_sq_head != hw_sq_tail) { + if (sw_sq_head > qp->initial_ring.head) { + if ((hw_sq_tail >= qp->initial_ring.head) && + (hw_sq_tail < sw_sq_head)) { + writel(qp->qp_id, qp->wqe_alloc_reg); + } + } else if (sw_sq_head != qp->initial_ring.head) { + if ((hw_sq_tail >= qp->initial_ring.head) || + (hw_sq_tail < sw_sq_head)) { + writel(qp->qp_id, qp->wqe_alloc_reg); + } + } + } + + qp->initial_ring.head = qp->sq_ring.head; +} + +/** + * i40iw_qp_ring_push_db - ring qp doorbell + * @qp: hw qp ptr + * @wqe_idx: wqe index + */ +static void i40iw_qp_ring_push_db(struct i40iw_qp_uk *qp, u32 wqe_idx) +{ + set_32bit_val(qp->push_db, 0, LS_32((wqe_idx >> 2), I40E_PFPE_WQEALLOC_WQE_DESC_INDEX) | qp->qp_id); + qp->initial_ring.head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); +} + +/** + * i40iw_qp_get_next_send_wqe - return next wqe ptr + * @qp: hw qp ptr + * @wqe_idx: return wqe index + * @wqe_size: size of sq wqe + */ +u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, + u32 *wqe_idx, + u8 wqe_size, + u32 total_size, + u64 wr_id + ) +{ + u64 *wqe = NULL; + u64 wqe_ptr; + u32 peek_head = 0; + u16 offset; + enum i40iw_status_code ret_code = 0; + u8 nop_wqe_cnt = 0, i; + u64 *wqe_0 = NULL; + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + wqe_ptr = (uintptr_t)qp->sq_base[*wqe_idx].elem; + offset = (u16)(wqe_ptr) & 0x7F; + if ((offset + wqe_size) > I40IW_QP_WQE_MAX_SIZE) { + nop_wqe_cnt = (u8)(I40IW_QP_WQE_MAX_SIZE - offset) / I40IW_QP_WQE_MIN_SIZE; + for (i = 0; i < nop_wqe_cnt; i++) { + i40iw_nop_1(qp); + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return NULL; + } + + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + } + + if (((*wqe_idx & 3) == 1) && (wqe_size == I40IW_WQE_SIZE_64)) { + i40iw_nop_1(qp); + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + if (ret_code) + return NULL; + *wqe_idx = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + if (!*wqe_idx) + qp->swqe_polarity = !qp->swqe_polarity; + } + I40IW_RING_MOVE_HEAD_BY_COUNT(qp->sq_ring, + wqe_size / I40IW_QP_WQE_MIN_SIZE, ret_code); + if (ret_code) + return NULL; + + wqe = qp->sq_base[*wqe_idx].elem; + + peek_head = I40IW_RING_GETCURRENT_HEAD(qp->sq_ring); + wqe_0 = qp->sq_base[peek_head].elem; + + if (((peek_head & 3) == 1) || ((peek_head & 3) == 3)) { + if (RS_64(wqe_0[3], I40IWQPSQ_VALID) != !qp->swqe_polarity) + wqe_0[3] = LS_64(!qp->swqe_polarity, I40IWQPSQ_VALID); + } + + qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; + qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; + qp->sq_wrtrk_array[*wqe_idx].wqe_size = wqe_size; + return wqe; +} + +/** + * i40iw_set_fragment - set fragment in wqe + * @wqe: wqe for setting fragment + * @offset: offset value + * @sge: sge length and stag + */ +static void i40iw_set_fragment(u64 *wqe, u32 offset, struct i40iw_sge *sge) +{ + if (sge) { + set_64bit_val(wqe, offset, LS_64(sge->tag_off, I40IWQPSQ_FRAG_TO)); + set_64bit_val(wqe, (offset + 8), + (LS_64(sge->len, I40IWQPSQ_FRAG_LEN) | + LS_64(sge->stag, I40IWQPSQ_FRAG_STAG))); + } +} + +/** + * i40iw_qp_get_next_recv_wqe - get next qp's rcv wqe + * @qp: hw qp ptr + * @wqe_idx: return wqe index + */ +u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx) +{ + u64 *wqe = NULL; + enum i40iw_status_code ret_code; + + if (I40IW_RING_FULL_ERR(qp->rq_ring)) + return NULL; + + I40IW_ATOMIC_RING_MOVE_HEAD(qp->rq_ring, *wqe_idx, ret_code); + if (ret_code) + return NULL; + if (!*wqe_idx) + qp->rwqe_polarity = !qp->rwqe_polarity; + /* rq_wqe_size_multiplier is no of qwords in one rq wqe */ + wqe = qp->rq_base[*wqe_idx * (qp->rq_wqe_size_multiplier >> 2)].elem; + + return wqe; +} + +/** + * i40iw_rdma_write - rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_rdma_write(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 header; + u64 *wqe; + struct i40iw_rdma_write *op_info; + u32 i, wqe_idx; + u32 total_size = 0, byte_off; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.rdma_write; + if (op_info->num_lo_sges > qp->max_sq_frag_cnt) + return I40IW_ERR_INVALID_FRAG_COUNT; + + for (i = 0; i < op_info->num_lo_sges; i++) + total_size += op_info->lo_sg_list[i].len; + + if (total_size > I40IW_MAX_OUTBOUND_MESSAGE_SIZE) + return I40IW_ERR_QP_INVALID_MSG_SIZE; + + read_fence |= info->read_fence; + + ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_lo_sges, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + set_64bit_val(wqe, 16, + LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + if (!op_info->rem_addr.stag) + return I40IW_ERR_BAD_STAG; + + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64((op_info->num_lo_sges > 1 ? (op_info->num_lo_sges - 1) : 0), I40IWQPSQ_ADDFRAGCNT) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, op_info->lo_sg_list); + + for (i = 1, byte_off = 32; i < op_info->num_lo_sges; i++) { + i40iw_set_fragment(wqe, byte_off, &op_info->lo_sg_list[i]); + byte_off += 16; + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_rdma_read - rdma read command + * @qp: hw qp ptr + * @info: post sq information + * @inv_stag: flag for inv_stag + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_rdma_read(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool inv_stag, + bool post_sq) +{ + u64 *wqe; + struct i40iw_rdma_read *op_info; + u64 header; + u32 wqe_idx; + enum i40iw_status_code ret_code; + u8 wqe_size; + bool local_fence = false; + + op_info = &info->op.rdma_read; + ret_code = i40iw_fragcnt_to_wqesize_sq(1, &wqe_size); + if (ret_code) + return ret_code; + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + local_fence |= info->local_fence; + + set_64bit_val(wqe, 16, LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64((inv_stag ? I40IWQP_OP_RDMA_READ_LOC_INV : I40IWQP_OP_RDMA_READ), I40IWQPSQ_OPCODE) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, &op_info->lo_addr); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_send - rdma send command + * @qp: hw qp ptr + * @info: post sq information + * @stag_to_inv: stag_to_inv value + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_send(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + u32 stag_to_inv, + bool post_sq) +{ + u64 *wqe; + struct i40iw_post_send *op_info; + u64 header; + u32 i, wqe_idx, total_size = 0, byte_off; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.send; + if (qp->max_sq_frag_cnt < op_info->num_sges) + return I40IW_ERR_INVALID_FRAG_COUNT; + + for (i = 0; i < op_info->num_sges; i++) + total_size += op_info->sg_list[i].len; + ret_code = i40iw_fragcnt_to_wqesize_sq(op_info->num_sges, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + set_64bit_val(wqe, 16, 0); + header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | + LS_64(info->op_type, I40IWQPSQ_OPCODE) | + LS_64((op_info->num_sges > 1 ? (op_info->num_sges - 1) : 0), + I40IWQPSQ_ADDFRAGCNT) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, op_info->sg_list); + + for (i = 1, byte_off = 32; i < op_info->num_sges; i++) { + i40iw_set_fragment(wqe, byte_off, &op_info->sg_list[i]); + byte_off += 16; + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_inline_rdma_write - inline rdma write operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_inline_rdma_write(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + u8 *dest, *src; + struct i40iw_inline_rdma_write *op_info; + u64 *push; + u64 header = 0; + u32 wqe_idx; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + + op_info = &info->op.inline_rdma_write; + if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; + + ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + set_64bit_val(wqe, 16, + LS_64(op_info->rem_addr.tag_off, I40IWQPSQ_FRAG_TO)); + + header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | + LS_64(I40IWQP_OP_RDMA_WRITE, I40IWQPSQ_OPCODE) | + LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | + LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | + LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + dest = (u8 *)wqe; + src = (u8 *)(op_info->data); + + if (op_info->len <= 16) { + memcpy(dest, src, op_info->len); + } else { + memcpy(dest, src, 16); + src += 16; + dest = (u8 *)wqe + 32; + memcpy(dest, src, op_info->len - 16); + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (qp->push_db) { + push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); + memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); + i40iw_qp_ring_push_db(qp, wqe_idx); + } else { + if (post_sq) + i40iw_qp_post_wr(qp); + } + + return 0; +} + +/** + * i40iw_inline_send - inline send operation + * @qp: hw qp ptr + * @info: post sq information + * @stag_to_inv: remote stag + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_inline_send(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + u32 stag_to_inv, + bool post_sq) +{ + u64 *wqe; + u8 *dest, *src; + struct i40iw_post_inline_send *op_info; + u64 header; + u32 wqe_idx; + enum i40iw_status_code ret_code; + bool read_fence = false; + u8 wqe_size; + u64 *push; + + op_info = &info->op.inline_send; + if (op_info->len > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; + + ret_code = i40iw_inline_data_size_to_wqesize(op_info->len, &wqe_size); + if (ret_code) + return ret_code; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + read_fence |= info->read_fence; + header = LS_64(stag_to_inv, I40IWQPSQ_REMSTAG) | + LS_64(info->op_type, I40IWQPSQ_OPCODE) | + LS_64(op_info->len, I40IWQPSQ_INLINEDATALEN) | + LS_64(1, I40IWQPSQ_INLINEDATAFLAG) | + LS_64((qp->push_db ? 1 : 0), I40IWQPSQ_PUSHWQE) | + LS_64(read_fence, I40IWQPSQ_READFENCE) | + LS_64(info->local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + dest = (u8 *)wqe; + src = (u8 *)(op_info->data); + + if (op_info->len <= 16) { + memcpy(dest, src, op_info->len); + } else { + memcpy(dest, src, 16); + src += 16; + dest = (u8 *)wqe + 32; + memcpy(dest, src, op_info->len - 16); + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (qp->push_db) { + push = (u64 *)((uintptr_t)qp->push_wqe + (wqe_idx & 0x3) * 0x20); + memcpy(push, wqe, (op_info->len > 16) ? op_info->len + 16 : 32); + i40iw_qp_ring_push_db(qp, wqe_idx); + } else { + if (post_sq) + i40iw_qp_post_wr(qp); + } + + return 0; +} + +/** + * i40iw_stag_local_invalidate - stag invalidate operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_stag_local_invalidate(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + struct i40iw_inv_local_stag *op_info; + u64 header; + u32 wqe_idx; + bool local_fence = false; + + op_info = &info->op.inv_local_stag; + local_fence = info->local_fence; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, + LS_64(op_info->target_stag, I40IWQPSQ_LOCSTAG)); + set_64bit_val(wqe, 16, 0); + header = LS_64(I40IW_OP_TYPE_INV_STAG, I40IWQPSQ_OPCODE) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_mw_bind - Memory Window bind operation + * @qp: hw qp ptr + * @info: post sq information + * @post_sq: flag to post sq + */ +static enum i40iw_status_code i40iw_mw_bind(struct i40iw_qp_uk *qp, + struct i40iw_post_sq_info *info, + bool post_sq) +{ + u64 *wqe; + struct i40iw_bind_window *op_info; + u64 header; + u32 wqe_idx; + bool local_fence = false; + + op_info = &info->op.bind_window; + + local_fence |= info->local_fence; + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + set_64bit_val(wqe, 0, (uintptr_t)op_info->va); + set_64bit_val(wqe, 8, + LS_64(op_info->mr_stag, I40IWQPSQ_PARENTMRSTAG) | + LS_64(op_info->mw_stag, I40IWQPSQ_MWSTAG)); + set_64bit_val(wqe, 16, op_info->bind_length); + header = LS_64(I40IW_OP_TYPE_BIND_MW, I40IWQPSQ_OPCODE) | + LS_64(((op_info->enable_reads << 2) | + (op_info->enable_writes << 3)), + I40IWQPSQ_STAGRIGHTS) | + LS_64((op_info->addressing_type == I40IW_ADDR_TYPE_VA_BASED ? 1 : 0), + I40IWQPSQ_VABASEDTO) | + LS_64(info->read_fence, I40IWQPSQ_READFENCE) | + LS_64(local_fence, I40IWQPSQ_LOCALFENCE) | + LS_64(info->signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_post_receive - post receive wqe + * @qp: hw qp ptr + * @info: post rq information + */ +static enum i40iw_status_code i40iw_post_receive(struct i40iw_qp_uk *qp, + struct i40iw_post_rq_info *info) +{ + u64 *wqe; + u64 header; + u32 total_size = 0, wqe_idx, i, byte_off; + + if (qp->max_rq_frag_cnt < info->num_sges) + return I40IW_ERR_INVALID_FRAG_COUNT; + for (i = 0; i < info->num_sges; i++) + total_size += info->sg_list[i].len; + wqe = i40iw_qp_get_next_recv_wqe(qp, &wqe_idx); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + + qp->rq_wrid_array[wqe_idx] = info->wr_id; + set_64bit_val(wqe, 16, 0); + + header = LS_64((info->num_sges > 1 ? (info->num_sges - 1) : 0), + I40IWQPSQ_ADDFRAGCNT) | + LS_64(qp->rwqe_polarity, I40IWQPSQ_VALID); + + i40iw_set_fragment(wqe, 0, info->sg_list); + + for (i = 1, byte_off = 32; i < info->num_sges; i++) { + i40iw_set_fragment(wqe, byte_off, &info->sg_list[i]); + byte_off += 16; + } + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + + return 0; +} + +/** + * i40iw_cq_request_notification - cq notification request (door bell) + * @cq: hw cq + * @cq_notify: notification type + */ +static void i40iw_cq_request_notification(struct i40iw_cq_uk *cq, + enum i40iw_completion_notify cq_notify) +{ + u64 temp_val; + u16 sw_cq_sel; + u8 arm_next_se = 0; + u8 arm_next = 0; + u8 arm_seq_num; + + get_64bit_val(cq->shadow_area, 32, &temp_val); + arm_seq_num = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_SEQ_NUM); + arm_seq_num++; + + sw_cq_sel = (u16)RS_64(temp_val, I40IW_CQ_DBSA_SW_CQ_SELECT); + arm_next_se = (u8)RS_64(temp_val, I40IW_CQ_DBSA_ARM_NEXT_SE); + arm_next_se |= 1; + if (cq_notify == IW_CQ_COMPL_EVENT) + arm_next = 1; + temp_val = LS_64(arm_seq_num, I40IW_CQ_DBSA_ARM_SEQ_NUM) | + LS_64(sw_cq_sel, I40IW_CQ_DBSA_SW_CQ_SELECT) | + LS_64(arm_next_se, I40IW_CQ_DBSA_ARM_NEXT_SE) | + LS_64(arm_next, I40IW_CQ_DBSA_ARM_NEXT); + + set_64bit_val(cq->shadow_area, 32, temp_val); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + writel(cq->cq_id, cq->cqe_alloc_reg); +} + +/** + * i40iw_cq_post_entries - update tail in shadow memory + * @cq: hw cq + * @count: # of entries processed + */ +static enum i40iw_status_code i40iw_cq_post_entries(struct i40iw_cq_uk *cq, + u8 count) +{ + I40IW_RING_MOVE_TAIL_BY_COUNT(cq->cq_ring, count); + set_64bit_val(cq->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); + return 0; +} + +/** + * i40iw_cq_poll_completion - get cq completion info + * @cq: hw cq + * @info: cq poll information returned + * @post_cq: update cq tail + */ +static enum i40iw_status_code i40iw_cq_poll_completion(struct i40iw_cq_uk *cq, + struct i40iw_cq_poll_info *info) +{ + u64 comp_ctx, qword0, qword2, qword3, wqe_qword; + u64 *cqe, *sw_wqe; + struct i40iw_qp_uk *qp; + struct i40iw_ring *pring = NULL; + u32 wqe_idx, q_type, array_idx = 0; + enum i40iw_status_code ret_code = 0; + bool move_cq_head = true; + u8 polarity; + u8 addl_wqes = 0; + + if (cq->avoid_mem_cflct) + cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq); + else + cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq); + + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); + + if (polarity != cq->polarity) + return I40IW_ERR_QUEUE_EMPTY; + + q_type = (u8)RS_64(qword3, I40IW_CQ_SQ); + info->error = (bool)RS_64(qword3, I40IW_CQ_ERROR); + info->push_dropped = (bool)RS_64(qword3, I40IWCQ_PSHDROP); + if (info->error) { + info->comp_status = I40IW_COMPL_STATUS_FLUSHED; + info->major_err = (bool)RS_64(qword3, I40IW_CQ_MAJERR); + info->minor_err = (bool)RS_64(qword3, I40IW_CQ_MINERR); + } else { + info->comp_status = I40IW_COMPL_STATUS_SUCCESS; + } + + get_64bit_val(cqe, 0, &qword0); + get_64bit_val(cqe, 16, &qword2); + + info->tcp_seq_num = (u32)RS_64(qword0, I40IWCQ_TCPSEQNUM); + + info->qp_id = (u32)RS_64(qword2, I40IWCQ_QPID); + + get_64bit_val(cqe, 8, &comp_ctx); + + info->solicited_event = (bool)RS_64(qword3, I40IWCQ_SOEVENT); + info->is_srq = (bool)RS_64(qword3, I40IWCQ_SRQ); + + qp = (struct i40iw_qp_uk *)(unsigned long)comp_ctx; + if (!qp) { + ret_code = I40IW_ERR_QUEUE_DESTROYED; + goto exit; + } + wqe_idx = (u32)RS_64(qword3, I40IW_CQ_WQEIDX); + info->qp_handle = (i40iw_qp_handle)(unsigned long)qp; + + if (q_type == I40IW_CQE_QTYPE_RQ) { + array_idx = (wqe_idx * 4) / qp->rq_wqe_size_multiplier; + if (info->comp_status == I40IW_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail]; + array_idx = qp->rq_ring.tail; + } else { + info->wr_id = qp->rq_wrid_array[array_idx]; + } + + info->op_type = I40IW_OP_TYPE_REC; + if (qword3 & I40IWCQ_STAG_MASK) { + info->stag_invalid_set = true; + info->inv_stag = (u32)RS_64(qword2, I40IWCQ_INVSTAG); + } else { + info->stag_invalid_set = false; + } + info->bytes_xfered = (u32)RS_64(qword0, I40IWCQ_PAYLDLEN); + I40IW_RING_SET_TAIL(qp->rq_ring, array_idx + 1); + pring = &qp->rq_ring; + } else { + if (qp->first_sq_wq) { + qp->first_sq_wq = false; + if (!wqe_idx && (qp->sq_ring.head == qp->sq_ring.tail)) { + I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); + I40IW_RING_MOVE_TAIL(cq->cq_ring); + set_64bit_val(cq->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); + memset(info, 0, sizeof(struct i40iw_cq_poll_info)); + return i40iw_cq_poll_completion(cq, info); + } + } + + if (info->comp_status != I40IW_COMPL_STATUS_FLUSHED) { + info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; + info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; + + info->op_type = (u8)RS_64(qword3, I40IWCQ_OP); + sw_wqe = qp->sq_base[wqe_idx].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + + addl_wqes = qp->sq_wrtrk_array[wqe_idx].wqe_size / I40IW_QP_WQE_MIN_SIZE; + I40IW_RING_SET_TAIL(qp->sq_ring, (wqe_idx + addl_wqes)); + } else { + do { + u8 op_type; + u32 tail; + + tail = qp->sq_ring.tail; + sw_wqe = qp->sq_base[tail].elem; + get_64bit_val(sw_wqe, 24, &wqe_qword); + op_type = (u8)RS_64(wqe_qword, I40IWQPSQ_OPCODE); + info->op_type = op_type; + addl_wqes = qp->sq_wrtrk_array[tail].wqe_size / I40IW_QP_WQE_MIN_SIZE; + I40IW_RING_SET_TAIL(qp->sq_ring, (tail + addl_wqes)); + if (op_type != I40IWQP_OP_NOP) { + info->wr_id = qp->sq_wrtrk_array[tail].wrid; + info->bytes_xfered = qp->sq_wrtrk_array[tail].wr_len; + break; + } + } while (1); + } + pring = &qp->sq_ring; + } + + ret_code = 0; + +exit: + if (!ret_code && + (info->comp_status == I40IW_COMPL_STATUS_FLUSHED)) + if (pring && (I40IW_RING_MORE_WORK(*pring))) + move_cq_head = false; + + if (move_cq_head) { + I40IW_RING_MOVE_HEAD_NOCHECK(cq->cq_ring); + + if (I40IW_RING_GETCURRENT_HEAD(cq->cq_ring) == 0) + cq->polarity ^= 1; + + I40IW_RING_MOVE_TAIL(cq->cq_ring); + set_64bit_val(cq->shadow_area, 0, + I40IW_RING_GETCURRENT_HEAD(cq->cq_ring)); + } else { + if (info->is_srq) + return ret_code; + qword3 &= ~I40IW_CQ_WQEIDX_MASK; + qword3 |= LS_64(pring->tail, I40IW_CQ_WQEIDX); + set_64bit_val(cqe, 24, qword3); + } + + return ret_code; +} + +/** + * i40iw_get_wqe_shift - get shift count for maximum wqe size + * @sge: Maximum Scatter Gather Elements wqe + * @inline_data: Maximum inline data size + * @shift: Returns the shift needed based on sge + * + * Shift can be used to left shift the wqe size based on number of SGEs and inlind data size. + * For 1 SGE or inline data <= 16, shift = 0 (wqe size of 32 bytes). + * For 2 or 3 SGEs or inline data <= 48, shift = 1 (wqe size of 64 bytes). + * Shift of 2 otherwise (wqe size of 128 bytes). + */ +void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift) +{ + *shift = 0; + if (sge > 1 || inline_data > 16) + *shift = (sge < 4 && inline_data <= 48) ? 1 : 2; +} + +/* + * i40iw_get_sqdepth - get SQ depth (quantas) + * @sq_size: SQ size + * @shift: shift which determines size of WQE + * @sqdepth: depth of SQ + * + */ +enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth) +{ + *sqdepth = roundup_pow_of_two((sq_size << shift) + I40IW_SQ_RSVD); + + if (*sqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift)) + *sqdepth = I40IW_QP_SW_MIN_WQSIZE << shift; + else if (*sqdepth > I40IW_QP_SW_MAX_SQ_QUANTAS) + return I40IW_ERR_INVALID_SIZE; + + return 0; +} + +/* + * i40iw_get_rq_depth - get RQ depth (quantas) + * @rq_size: RQ size + * @shift: shift which determines size of WQE + * @rqdepth: depth of RQ + * + */ +enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth) +{ + *rqdepth = roundup_pow_of_two((rq_size << shift) + I40IW_RQ_RSVD); + + if (*rqdepth < (I40IW_QP_SW_MIN_WQSIZE << shift)) + *rqdepth = I40IW_QP_SW_MIN_WQSIZE << shift; + else if (*rqdepth > I40IW_QP_SW_MAX_RQ_QUANTAS) + return I40IW_ERR_INVALID_SIZE; + + return 0; +} + +static const struct i40iw_qp_uk_ops iw_qp_uk_ops = { + .iw_qp_post_wr = i40iw_qp_post_wr, + .iw_qp_ring_push_db = i40iw_qp_ring_push_db, + .iw_rdma_write = i40iw_rdma_write, + .iw_rdma_read = i40iw_rdma_read, + .iw_send = i40iw_send, + .iw_inline_rdma_write = i40iw_inline_rdma_write, + .iw_inline_send = i40iw_inline_send, + .iw_stag_local_invalidate = i40iw_stag_local_invalidate, + .iw_mw_bind = i40iw_mw_bind, + .iw_post_receive = i40iw_post_receive, + .iw_post_nop = i40iw_nop +}; + +static const struct i40iw_cq_ops iw_cq_ops = { + .iw_cq_request_notification = i40iw_cq_request_notification, + .iw_cq_poll_completion = i40iw_cq_poll_completion, + .iw_cq_post_entries = i40iw_cq_post_entries, + .iw_cq_clean = i40iw_clean_cq +}; + +static const struct i40iw_device_uk_ops iw_device_uk_ops = { + .iwarp_cq_uk_init = i40iw_cq_uk_init, + .iwarp_qp_uk_init = i40iw_qp_uk_init, +}; + +/** + * i40iw_qp_uk_init - initialize shared qp + * @qp: hw qp (user and kernel) + * @info: qp initialization info + * + * initializes the vars used in both user and kernel mode. + * size of the wqe depends on numbers of max. fragements + * allowed. Then size of wqe * the number of wqes should be the + * amount of memory allocated for sq and rq. If srq is used, + * then rq_base will point to one rq wqe only (not the whole + * array of wqes) + */ +enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp, + struct i40iw_qp_uk_init_info *info) +{ + enum i40iw_status_code ret_code = 0; + u32 sq_ring_size; + u8 sqshift, rqshift; + + if (info->max_sq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) + return I40IW_ERR_INVALID_FRAG_COUNT; + + if (info->max_rq_frag_cnt > I40IW_MAX_WQ_FRAGMENT_COUNT) + return I40IW_ERR_INVALID_FRAG_COUNT; + i40iw_get_wqe_shift(info->max_sq_frag_cnt, info->max_inline_data, &sqshift); + + qp->sq_base = info->sq; + qp->rq_base = info->rq; + qp->shadow_area = info->shadow_area; + qp->sq_wrtrk_array = info->sq_wrtrk_array; + qp->rq_wrid_array = info->rq_wrid_array; + + qp->wqe_alloc_reg = info->wqe_alloc_reg; + qp->qp_id = info->qp_id; + + qp->sq_size = info->sq_size; + qp->push_db = info->push_db; + qp->push_wqe = info->push_wqe; + + qp->max_sq_frag_cnt = info->max_sq_frag_cnt; + sq_ring_size = qp->sq_size << sqshift; + + I40IW_RING_INIT(qp->sq_ring, sq_ring_size); + I40IW_RING_INIT(qp->initial_ring, sq_ring_size); + I40IW_RING_MOVE_HEAD(qp->sq_ring, ret_code); + I40IW_RING_MOVE_TAIL(qp->sq_ring); + I40IW_RING_MOVE_HEAD(qp->initial_ring, ret_code); + qp->swqe_polarity = 1; + qp->first_sq_wq = true; + qp->swqe_polarity_deferred = 1; + qp->rwqe_polarity = 0; + + if (!qp->use_srq) { + qp->rq_size = info->rq_size; + qp->max_rq_frag_cnt = info->max_rq_frag_cnt; + I40IW_RING_INIT(qp->rq_ring, qp->rq_size); + switch (info->abi_ver) { + case 4: + i40iw_get_wqe_shift(info->max_rq_frag_cnt, 0, &rqshift); + break; + case 5: /* fallthrough until next ABI version */ + default: + rqshift = I40IW_MAX_RQ_WQE_SHIFT; + break; + } + qp->rq_wqe_size = rqshift; + qp->rq_wqe_size_multiplier = 4 << rqshift; + } + qp->ops = iw_qp_uk_ops; + + return ret_code; +} + +/** + * i40iw_cq_uk_init - initialize shared cq (user and kernel) + * @cq: hw cq + * @info: hw cq initialization info + */ +enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, + struct i40iw_cq_uk_init_info *info) +{ + if ((info->cq_size < I40IW_MIN_CQ_SIZE) || + (info->cq_size > I40IW_MAX_CQ_SIZE)) + return I40IW_ERR_INVALID_SIZE; + cq->cq_base = (struct i40iw_cqe *)info->cq_base; + cq->cq_id = info->cq_id; + cq->cq_size = info->cq_size; + cq->cqe_alloc_reg = info->cqe_alloc_reg; + cq->shadow_area = info->shadow_area; + cq->avoid_mem_cflct = info->avoid_mem_cflct; + + I40IW_RING_INIT(cq->cq_ring, cq->cq_size); + cq->polarity = 1; + cq->ops = iw_cq_ops; + + return 0; +} + +/** + * i40iw_device_init_uk - setup routines for iwarp shared device + * @dev: iwarp shared (user and kernel) + */ +void i40iw_device_init_uk(struct i40iw_dev_uk *dev) +{ + dev->ops_uk = iw_device_uk_ops; +} + +/** + * i40iw_clean_cq - clean cq entries + * @ queue completion context + * @cq: cq to clean + */ +void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq) +{ + u64 *cqe; + u64 qword3, comp_ctx; + u32 cq_head; + u8 polarity, temp; + + cq_head = cq->cq_ring.head; + temp = cq->polarity; + do { + if (cq->avoid_mem_cflct) + cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]); + else + cqe = (u64 *)&cq->cq_base[cq_head]; + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)RS_64(qword3, I40IW_CQ_VALID); + + if (polarity != temp) + break; + + get_64bit_val(cqe, 8, &comp_ctx); + if ((void *)(unsigned long)comp_ctx == queue) + set_64bit_val(cqe, 8, 0); + + cq_head = (cq_head + 1) % cq->cq_ring.size; + if (!cq_head) + temp ^= 1; + } while (true); +} + +/** + * i40iw_nop - send a nop + * @qp: hw qp ptr + * @wr_id: work request id + * @signaled: flag if signaled for completion + * @post_sq: flag to post sq + */ +enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, + u64 wr_id, + bool signaled, + bool post_sq) +{ + u64 header, *wqe; + u32 wqe_idx; + + wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, wr_id); + if (!wqe) + return I40IW_ERR_QP_TOOMANY_WRS_POSTED; + set_64bit_val(wqe, 0, 0); + set_64bit_val(wqe, 8, 0); + set_64bit_val(wqe, 16, 0); + + header = LS_64(I40IWQP_OP_NOP, I40IWQPSQ_OPCODE) | + LS_64(signaled, I40IWQPSQ_SIGCOMPL) | + LS_64(qp->swqe_polarity, I40IWQPSQ_VALID); + + wmb(); /* make sure WQE is populated before valid bit is set */ + + set_64bit_val(wqe, 24, header); + if (post_sq) + i40iw_qp_post_wr(qp); + + return 0; +} + +/** + * i40iw_fragcnt_to_wqesize_sq - calculate wqe size based on fragment count for SQ + * @frag_cnt: number of fragments + * @wqe_size: size of sq wqe returned + */ +enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size) +{ + switch (frag_cnt) { + case 0: + case 1: + *wqe_size = I40IW_QP_WQE_MIN_SIZE; + break; + case 2: + case 3: + *wqe_size = 64; + break; + case 4: + case 5: + *wqe_size = 96; + break; + case 6: + case 7: + *wqe_size = 128; + break; + default: + return I40IW_ERR_INVALID_FRAG_COUNT; + } + + return 0; +} + +/** + * i40iw_fragcnt_to_wqesize_rq - calculate wqe size based on fragment count for RQ + * @frag_cnt: number of fragments + * @wqe_size: size of rq wqe returned + */ +enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size) +{ + switch (frag_cnt) { + case 0: + case 1: + *wqe_size = 32; + break; + case 2: + case 3: + *wqe_size = 64; + break; + case 4: + case 5: + case 6: + case 7: + *wqe_size = 128; + break; + default: + return I40IW_ERR_INVALID_FRAG_COUNT; + } + + return 0; +} + +/** + * i40iw_inline_data_size_to_wqesize - based on inline data, wqe size + * @data_size: data size for inline + * @wqe_size: size of sq wqe returned + */ +enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, + u8 *wqe_size) +{ + if (data_size > I40IW_MAX_INLINE_DATA_SIZE) + return I40IW_ERR_INVALID_INLINE_DATA_SIZE; + + if (data_size <= 16) + *wqe_size = I40IW_QP_WQE_MIN_SIZE; + else + *wqe_size = 64; + + return 0; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_user.h b/drivers/infiniband/hw/i40iw/i40iw_user.h new file mode 100644 index 000000000..b12592564 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_user.h @@ -0,0 +1,430 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_USER_H +#define I40IW_USER_H + +enum i40iw_device_capabilities_const { + I40IW_WQE_SIZE = 4, + I40IW_CQP_WQE_SIZE = 8, + I40IW_CQE_SIZE = 4, + I40IW_EXTENDED_CQE_SIZE = 8, + I40IW_AEQE_SIZE = 2, + I40IW_CEQE_SIZE = 1, + I40IW_CQP_CTX_SIZE = 8, + I40IW_SHADOW_AREA_SIZE = 8, + I40IW_CEQ_MAX_COUNT = 256, + I40IW_QUERY_FPM_BUF_SIZE = 128, + I40IW_COMMIT_FPM_BUF_SIZE = 128, + I40IW_MIN_IW_QP_ID = 1, + I40IW_MAX_IW_QP_ID = 262143, + I40IW_MIN_CEQID = 0, + I40IW_MAX_CEQID = 256, + I40IW_MIN_CQID = 0, + I40IW_MAX_CQID = 131071, + I40IW_MIN_AEQ_ENTRIES = 1, + I40IW_MAX_AEQ_ENTRIES = 524287, + I40IW_MIN_CEQ_ENTRIES = 1, + I40IW_MAX_CEQ_ENTRIES = 131071, + I40IW_MIN_CQ_SIZE = 1, + I40IW_MAX_CQ_SIZE = 1048575, + I40IW_DB_ID_ZERO = 0, + I40IW_MAX_WQ_FRAGMENT_COUNT = 3, + I40IW_MAX_SGE_RD = 1, + I40IW_MAX_OUTBOUND_MESSAGE_SIZE = 2147483647, + I40IW_MAX_INBOUND_MESSAGE_SIZE = 2147483647, + I40IW_MAX_PUSH_PAGE_COUNT = 4096, + I40IW_MAX_PE_ENABLED_VF_COUNT = 32, + I40IW_MAX_VF_FPM_ID = 47, + I40IW_MAX_VF_PER_PF = 127, + I40IW_MAX_SQ_PAYLOAD_SIZE = 2145386496, + I40IW_MAX_INLINE_DATA_SIZE = 48, + I40IW_MAX_PUSHMODE_INLINE_DATA_SIZE = 48, + I40IW_MAX_IRD_SIZE = 64, + I40IW_MAX_ORD_SIZE = 127, + I40IW_MAX_WQ_ENTRIES = 2048, + I40IW_Q2_BUFFER_SIZE = (248 + 100), + I40IW_MAX_WQE_SIZE_RQ = 128, + I40IW_QP_CTX_SIZE = 248, + I40IW_MAX_PDS = 32768 +}; + +#define i40iw_handle void * +#define i40iw_adapter_handle i40iw_handle +#define i40iw_qp_handle i40iw_handle +#define i40iw_cq_handle i40iw_handle +#define i40iw_srq_handle i40iw_handle +#define i40iw_pd_id i40iw_handle +#define i40iw_stag_handle i40iw_handle +#define i40iw_stag_index u32 +#define i40iw_stag u32 +#define i40iw_stag_key u8 + +#define i40iw_tagged_offset u64 +#define i40iw_access_privileges u32 +#define i40iw_physical_fragment u64 +#define i40iw_address_list u64 * + +#define I40IW_MAX_MR_SIZE 0x10000000000L +#define I40IW_MAX_RQ_WQE_SHIFT 2 + +struct i40iw_qp_uk; +struct i40iw_cq_uk; +struct i40iw_srq_uk; +struct i40iw_qp_uk_init_info; +struct i40iw_cq_uk_init_info; +struct i40iw_srq_uk_init_info; + +struct i40iw_sge { + i40iw_tagged_offset tag_off; + u32 len; + i40iw_stag stag; +}; + +#define i40iw_sgl struct i40iw_sge * + +struct i40iw_ring { + u32 head; + u32 tail; + u32 size; +}; + +struct i40iw_cqe { + u64 buf[I40IW_CQE_SIZE]; +}; + +struct i40iw_extended_cqe { + u64 buf[I40IW_EXTENDED_CQE_SIZE]; +}; + +struct i40iw_wqe { + u64 buf[I40IW_WQE_SIZE]; +}; + +struct i40iw_qp_uk_ops; + +enum i40iw_addressing_type { + I40IW_ADDR_TYPE_ZERO_BASED = 0, + I40IW_ADDR_TYPE_VA_BASED = 1, +}; + +#define I40IW_ACCESS_FLAGS_LOCALREAD 0x01 +#define I40IW_ACCESS_FLAGS_LOCALWRITE 0x02 +#define I40IW_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04 +#define I40IW_ACCESS_FLAGS_REMOTEREAD 0x05 +#define I40IW_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08 +#define I40IW_ACCESS_FLAGS_REMOTEWRITE 0x0a +#define I40IW_ACCESS_FLAGS_BIND_WINDOW 0x10 +#define I40IW_ACCESS_FLAGS_ALL 0x1F + +#define I40IW_OP_TYPE_RDMA_WRITE 0 +#define I40IW_OP_TYPE_RDMA_READ 1 +#define I40IW_OP_TYPE_SEND 3 +#define I40IW_OP_TYPE_SEND_INV 4 +#define I40IW_OP_TYPE_SEND_SOL 5 +#define I40IW_OP_TYPE_SEND_SOL_INV 6 +#define I40IW_OP_TYPE_REC 7 +#define I40IW_OP_TYPE_BIND_MW 8 +#define I40IW_OP_TYPE_FAST_REG_NSMR 9 +#define I40IW_OP_TYPE_INV_STAG 10 +#define I40IW_OP_TYPE_RDMA_READ_INV_STAG 11 +#define I40IW_OP_TYPE_NOP 12 + +enum i40iw_completion_status { + I40IW_COMPL_STATUS_SUCCESS = 0, + I40IW_COMPL_STATUS_FLUSHED, + I40IW_COMPL_STATUS_INVALID_WQE, + I40IW_COMPL_STATUS_QP_CATASTROPHIC, + I40IW_COMPL_STATUS_REMOTE_TERMINATION, + I40IW_COMPL_STATUS_INVALID_STAG, + I40IW_COMPL_STATUS_BASE_BOUND_VIOLATION, + I40IW_COMPL_STATUS_ACCESS_VIOLATION, + I40IW_COMPL_STATUS_INVALID_PD_ID, + I40IW_COMPL_STATUS_WRAP_ERROR, + I40IW_COMPL_STATUS_STAG_INVALID_PDID, + I40IW_COMPL_STATUS_RDMA_READ_ZERO_ORD, + I40IW_COMPL_STATUS_QP_NOT_PRIVLEDGED, + I40IW_COMPL_STATUS_STAG_NOT_INVALID, + I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_SIZE, + I40IW_COMPL_STATUS_INVALID_PHYS_BUFFER_ENTRY, + I40IW_COMPL_STATUS_INVALID_FBO, + I40IW_COMPL_STATUS_INVALID_LENGTH, + I40IW_COMPL_STATUS_INVALID_ACCESS, + I40IW_COMPL_STATUS_PHYS_BUFFER_LIST_TOO_LONG, + I40IW_COMPL_STATUS_INVALID_VIRT_ADDRESS, + I40IW_COMPL_STATUS_INVALID_REGION, + I40IW_COMPL_STATUS_INVALID_WINDOW, + I40IW_COMPL_STATUS_INVALID_TOTAL_LENGTH +}; + +enum i40iw_completion_notify { + IW_CQ_COMPL_EVENT = 0, + IW_CQ_COMPL_SOLICITED = 1 +}; + +struct i40iw_post_send { + i40iw_sgl sg_list; + u32 num_sges; +}; + +struct i40iw_post_inline_send { + void *data; + u32 len; +}; + +struct i40iw_rdma_write { + i40iw_sgl lo_sg_list; + u32 num_lo_sges; + struct i40iw_sge rem_addr; +}; + +struct i40iw_inline_rdma_write { + void *data; + u32 len; + struct i40iw_sge rem_addr; +}; + +struct i40iw_rdma_read { + struct i40iw_sge lo_addr; + struct i40iw_sge rem_addr; +}; + +struct i40iw_bind_window { + i40iw_stag mr_stag; + u64 bind_length; + void *va; + enum i40iw_addressing_type addressing_type; + bool enable_reads; + bool enable_writes; + i40iw_stag mw_stag; +}; + +struct i40iw_inv_local_stag { + i40iw_stag target_stag; +}; + +struct i40iw_post_sq_info { + u64 wr_id; + u8 op_type; + bool signaled; + bool read_fence; + bool local_fence; + bool inline_data; + bool defer_flag; + union { + struct i40iw_post_send send; + struct i40iw_rdma_write rdma_write; + struct i40iw_rdma_read rdma_read; + struct i40iw_rdma_read rdma_read_inv; + struct i40iw_bind_window bind_window; + struct i40iw_inv_local_stag inv_local_stag; + struct i40iw_inline_rdma_write inline_rdma_write; + struct i40iw_post_inline_send inline_send; + } op; +}; + +struct i40iw_post_rq_info { + u64 wr_id; + i40iw_sgl sg_list; + u32 num_sges; +}; + +struct i40iw_cq_poll_info { + u64 wr_id; + i40iw_qp_handle qp_handle; + u32 bytes_xfered; + u32 tcp_seq_num; + u32 qp_id; + i40iw_stag inv_stag; + enum i40iw_completion_status comp_status; + u16 major_err; + u16 minor_err; + u8 op_type; + bool stag_invalid_set; + bool push_dropped; + bool error; + bool is_srq; + bool solicited_event; +}; + +struct i40iw_qp_uk_ops { + void (*iw_qp_post_wr)(struct i40iw_qp_uk *); + void (*iw_qp_ring_push_db)(struct i40iw_qp_uk *, u32); + enum i40iw_status_code (*iw_rdma_write)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_rdma_read)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool, bool); + enum i40iw_status_code (*iw_send)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, u32, bool); + enum i40iw_status_code (*iw_inline_rdma_write)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_inline_send)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, u32, bool); + enum i40iw_status_code (*iw_stag_local_invalidate)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_mw_bind)(struct i40iw_qp_uk *, + struct i40iw_post_sq_info *, bool); + enum i40iw_status_code (*iw_post_receive)(struct i40iw_qp_uk *, + struct i40iw_post_rq_info *); + enum i40iw_status_code (*iw_post_nop)(struct i40iw_qp_uk *, u64, bool, bool); +}; + +struct i40iw_cq_ops { + void (*iw_cq_request_notification)(struct i40iw_cq_uk *, + enum i40iw_completion_notify); + enum i40iw_status_code (*iw_cq_poll_completion)(struct i40iw_cq_uk *, + struct i40iw_cq_poll_info *); + enum i40iw_status_code (*iw_cq_post_entries)(struct i40iw_cq_uk *, u8 count); + void (*iw_cq_clean)(void *, struct i40iw_cq_uk *); +}; + +struct i40iw_dev_uk; + +struct i40iw_device_uk_ops { + enum i40iw_status_code (*iwarp_cq_uk_init)(struct i40iw_cq_uk *, + struct i40iw_cq_uk_init_info *); + enum i40iw_status_code (*iwarp_qp_uk_init)(struct i40iw_qp_uk *, + struct i40iw_qp_uk_init_info *); +}; + +struct i40iw_dev_uk { + struct i40iw_device_uk_ops ops_uk; +}; + +struct i40iw_sq_uk_wr_trk_info { + u64 wrid; + u32 wr_len; + u8 wqe_size; + u8 reserved[3]; +}; + +struct i40iw_qp_quanta { + u64 elem[I40IW_WQE_SIZE]; +}; + +struct i40iw_qp_uk { + struct i40iw_qp_quanta *sq_base; + struct i40iw_qp_quanta *rq_base; + u32 __iomem *wqe_alloc_reg; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u64 *shadow_area; + u32 *push_db; + u64 *push_wqe; + struct i40iw_ring sq_ring; + struct i40iw_ring rq_ring; + struct i40iw_ring initial_ring; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + struct i40iw_qp_uk_ops ops; + bool use_srq; + u8 swqe_polarity; + u8 swqe_polarity_deferred; + u8 rwqe_polarity; + u8 rq_wqe_size; + u8 rq_wqe_size_multiplier; + bool first_sq_wq; + bool deferred_flag; +}; + +struct i40iw_cq_uk { + struct i40iw_cqe *cq_base; + u32 __iomem *cqe_alloc_reg; + u64 *shadow_area; + u32 cq_id; + u32 cq_size; + struct i40iw_ring cq_ring; + u8 polarity; + bool avoid_mem_cflct; + + struct i40iw_cq_ops ops; +}; + +struct i40iw_qp_uk_init_info { + struct i40iw_qp_quanta *sq; + struct i40iw_qp_quanta *rq; + u32 __iomem *wqe_alloc_reg; + u64 *shadow_area; + struct i40iw_sq_uk_wr_trk_info *sq_wrtrk_array; + u64 *rq_wrid_array; + u32 *push_db; + u64 *push_wqe; + u32 qp_id; + u32 sq_size; + u32 rq_size; + u32 max_sq_frag_cnt; + u32 max_rq_frag_cnt; + u32 max_inline_data; + int abi_ver; +}; + +struct i40iw_cq_uk_init_info { + u32 __iomem *cqe_alloc_reg; + struct i40iw_cqe *cq_base; + u64 *shadow_area; + u32 cq_size; + u32 cq_id; + bool avoid_mem_cflct; +}; + +void i40iw_device_init_uk(struct i40iw_dev_uk *dev); + +void i40iw_qp_post_wr(struct i40iw_qp_uk *qp); +u64 *i40iw_qp_get_next_send_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx, + u8 wqe_size, + u32 total_size, + u64 wr_id + ); +u64 *i40iw_qp_get_next_recv_wqe(struct i40iw_qp_uk *qp, u32 *wqe_idx); +u64 *i40iw_qp_get_next_srq_wqe(struct i40iw_srq_uk *srq, u32 *wqe_idx); + +enum i40iw_status_code i40iw_cq_uk_init(struct i40iw_cq_uk *cq, + struct i40iw_cq_uk_init_info *info); +enum i40iw_status_code i40iw_qp_uk_init(struct i40iw_qp_uk *qp, + struct i40iw_qp_uk_init_info *info); + +void i40iw_clean_cq(void *queue, struct i40iw_cq_uk *cq); +enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id, + bool signaled, bool post_sq); +enum i40iw_status_code i40iw_fragcnt_to_wqesize_sq(u32 frag_cnt, u8 *wqe_size); +enum i40iw_status_code i40iw_fragcnt_to_wqesize_rq(u32 frag_cnt, u8 *wqe_size); +enum i40iw_status_code i40iw_inline_data_size_to_wqesize(u32 data_size, + u8 *wqe_size); +void i40iw_get_wqe_shift(u32 sge, u32 inline_data, u8 *shift); +enum i40iw_status_code i40iw_get_sqdepth(u32 sq_size, u8 shift, u32 *sqdepth); +enum i40iw_status_code i40iw_get_rqdepth(u32 rq_size, u8 shift, u32 *rqdepth); +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c new file mode 100644 index 000000000..dda8e79d4 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -0,0 +1,1554 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/if_vlan.h> +#include <linux/crc32.h> +#include <linux/in.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/init.h> +#include <linux/io.h> +#include <asm/irq.h> +#include <asm/byteorder.h> +#include <net/netevent.h> +#include <net/neighbour.h> +#include "i40iw.h" + +/** + * i40iw_arp_table - manage arp table + * @iwdev: iwarp device + * @ip_addr: ip address for device + * @mac_addr: mac address ptr + * @action: modify, delete or add + */ +int i40iw_arp_table(struct i40iw_device *iwdev, + u32 *ip_addr, + bool ipv4, + u8 *mac_addr, + u32 action) +{ + int arp_index; + int err; + u32 ip[4]; + + if (ipv4) { + memset(ip, 0, sizeof(ip)); + ip[0] = *ip_addr; + } else { + memcpy(ip, ip_addr, sizeof(ip)); + } + + for (arp_index = 0; (u32)arp_index < iwdev->arp_table_size; arp_index++) + if (memcmp(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)) == 0) + break; + switch (action) { + case I40IW_ARP_ADD: + if (arp_index != iwdev->arp_table_size) + return -1; + + arp_index = 0; + err = i40iw_alloc_resource(iwdev, iwdev->allocated_arps, + iwdev->arp_table_size, + (u32 *)&arp_index, + &iwdev->next_arp_index); + + if (err) + return err; + + memcpy(iwdev->arp_table[arp_index].ip_addr, ip, sizeof(ip)); + ether_addr_copy(iwdev->arp_table[arp_index].mac_addr, mac_addr); + break; + case I40IW_ARP_RESOLVE: + if (arp_index == iwdev->arp_table_size) + return -1; + break; + case I40IW_ARP_DELETE: + if (arp_index == iwdev->arp_table_size) + return -1; + memset(iwdev->arp_table[arp_index].ip_addr, 0, + sizeof(iwdev->arp_table[arp_index].ip_addr)); + eth_zero_addr(iwdev->arp_table[arp_index].mac_addr); + i40iw_free_resource(iwdev, iwdev->allocated_arps, arp_index); + break; + default: + return -1; + } + return arp_index; +} + +/** + * i40iw_wr32 - write 32 bits to hw register + * @hw: hardware information including registers + * @reg: register offset + * @value: vvalue to write to register + */ +inline void i40iw_wr32(struct i40iw_hw *hw, u32 reg, u32 value) +{ + writel(value, hw->hw_addr + reg); +} + +/** + * i40iw_rd32 - read a 32 bit hw register + * @hw: hardware information including registers + * @reg: register offset + * + * Return value of register content + */ +inline u32 i40iw_rd32(struct i40iw_hw *hw, u32 reg) +{ + return readl(hw->hw_addr + reg); +} + +/** + * i40iw_inetaddr_event - system notifier for ipv4 addr events + * @notfier: not used + * @event: event for notifier + * @ptr: if address + */ +int i40iw_inetaddr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr) +{ + struct in_ifaddr *ifa = ptr; + struct net_device *event_netdev = ifa->ifa_dev->dev; + struct net_device *netdev; + struct net_device *upper_dev; + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + u32 local_ipaddr; + u32 action = I40IW_ARP_ADD; + + hdl = i40iw_find_netdev(event_netdev); + if (!hdl) + return NOTIFY_DONE; + + iwdev = &hdl->device; + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) + return NOTIFY_DONE; + + netdev = iwdev->ldev->netdev; + upper_dev = netdev_master_upper_dev_get(netdev); + if (netdev != event_netdev) + return NOTIFY_DONE; + + if (upper_dev) { + struct in_device *in; + + rcu_read_lock(); + in = __in_dev_get_rcu(upper_dev); + + if (!in->ifa_list) + local_ipaddr = 0; + else + local_ipaddr = ntohl(in->ifa_list->ifa_address); + + rcu_read_unlock(); + } else { + local_ipaddr = ntohl(ifa->ifa_address); + } + switch (event) { + case NETDEV_DOWN: + action = I40IW_ARP_DELETE; + /* Fall through */ + case NETDEV_UP: + /* Fall through */ + case NETDEV_CHANGEADDR: + + /* Just skip if no need to handle ARP cache */ + if (!local_ipaddr) + break; + + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + &local_ipaddr, + true, + action); + i40iw_if_notify(iwdev, netdev, &local_ipaddr, true, + (action == I40IW_ARP_ADD) ? true : false); + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_inet6addr_event - system notifier for ipv6 addr events + * @notfier: not used + * @event: event for notifier + * @ptr: if address + */ +int i40iw_inet6addr_event(struct notifier_block *notifier, + unsigned long event, + void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *event_netdev = ifa->idev->dev; + struct net_device *netdev; + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + u32 local_ipaddr6[4]; + u32 action = I40IW_ARP_ADD; + + hdl = i40iw_find_netdev(event_netdev); + if (!hdl) + return NOTIFY_DONE; + + iwdev = &hdl->device; + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) + return NOTIFY_DONE; + + netdev = iwdev->ldev->netdev; + if (netdev != event_netdev) + return NOTIFY_DONE; + + i40iw_copy_ip_ntohl(local_ipaddr6, ifa->addr.in6_u.u6_addr32); + switch (event) { + case NETDEV_DOWN: + action = I40IW_ARP_DELETE; + /* Fall through */ + case NETDEV_UP: + /* Fall through */ + case NETDEV_CHANGEADDR: + i40iw_manage_arp_cache(iwdev, + netdev->dev_addr, + local_ipaddr6, + false, + action); + i40iw_if_notify(iwdev, netdev, local_ipaddr6, false, + (action == I40IW_ARP_ADD) ? true : false); + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_net_event - system notifier for netevents + * @notfier: not used + * @event: event for notifier + * @ptr: neighbor + */ +int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *ptr) +{ + struct neighbour *neigh = ptr; + struct i40iw_device *iwdev; + struct i40iw_handler *iwhdl; + __be32 *p; + u32 local_ipaddr[4]; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + iwhdl = i40iw_find_netdev((struct net_device *)neigh->dev); + if (!iwhdl) + return NOTIFY_DONE; + iwdev = &iwhdl->device; + if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing) + return NOTIFY_DONE; + p = (__be32 *)neigh->primary_key; + i40iw_copy_ip_ntohl(local_ipaddr, p); + if (neigh->nud_state & NUD_VALID) { + i40iw_manage_arp_cache(iwdev, + neigh->ha, + local_ipaddr, + false, + I40IW_ARP_ADD); + + } else { + i40iw_manage_arp_cache(iwdev, + neigh->ha, + local_ipaddr, + false, + I40IW_ARP_DELETE); + } + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_netdevice_event - system notifier for netdev events + * @notfier: not used + * @event: event for notifier + * @ptr: netdev + */ +int i40iw_netdevice_event(struct notifier_block *notifier, + unsigned long event, + void *ptr) +{ + struct net_device *event_netdev; + struct net_device *netdev; + struct i40iw_device *iwdev; + struct i40iw_handler *hdl; + + event_netdev = netdev_notifier_info_to_dev(ptr); + + hdl = i40iw_find_netdev(event_netdev); + if (!hdl) + return NOTIFY_DONE; + + iwdev = &hdl->device; + if (iwdev->init_state < RDMA_DEV_REGISTERED || iwdev->closing) + return NOTIFY_DONE; + + netdev = iwdev->ldev->netdev; + if (netdev != event_netdev) + return NOTIFY_DONE; + + iwdev->iw_status = 1; + + switch (event) { + case NETDEV_DOWN: + iwdev->iw_status = 0; + /* Fall through */ + case NETDEV_UP: + i40iw_port_ibevent(iwdev); + break; + default: + break; + } + return NOTIFY_DONE; +} + +/** + * i40iw_get_cqp_request - get cqp struct + * @cqp: device cqp ptr + * @wait: cqp to be used in wait mode + */ +struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait) +{ + struct i40iw_cqp_request *cqp_request = NULL; + unsigned long flags; + + spin_lock_irqsave(&cqp->req_lock, flags); + if (!list_empty(&cqp->cqp_avail_reqs)) { + cqp_request = list_entry(cqp->cqp_avail_reqs.next, + struct i40iw_cqp_request, list); + list_del_init(&cqp_request->list); + } + spin_unlock_irqrestore(&cqp->req_lock, flags); + if (!cqp_request) { + cqp_request = kzalloc(sizeof(*cqp_request), GFP_ATOMIC); + if (cqp_request) { + cqp_request->dynamic = true; + INIT_LIST_HEAD(&cqp_request->list); + init_waitqueue_head(&cqp_request->waitq); + } + } + if (!cqp_request) { + i40iw_pr_err("CQP Request Fail: No Memory"); + return NULL; + } + + if (wait) { + atomic_set(&cqp_request->refcount, 2); + cqp_request->waiting = true; + } else { + atomic_set(&cqp_request->refcount, 1); + } + return cqp_request; +} + +/** + * i40iw_free_cqp_request - free cqp request + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) +{ + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); + unsigned long flags; + + if (cqp_request->dynamic) { + kfree(cqp_request); + } else { + cqp_request->request_done = false; + cqp_request->callback_fcn = NULL; + cqp_request->waiting = false; + + spin_lock_irqsave(&cqp->req_lock, flags); + list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); + spin_unlock_irqrestore(&cqp->req_lock, flags); + } + wake_up(&iwdev->close_wq); +} + +/** + * i40iw_put_cqp_request - dec ref count and free if 0 + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +void i40iw_put_cqp_request(struct i40iw_cqp *cqp, + struct i40iw_cqp_request *cqp_request) +{ + if (atomic_dec_and_test(&cqp_request->refcount)) + i40iw_free_cqp_request(cqp, cqp_request); +} + +/** + * i40iw_free_pending_cqp_request -free pending cqp request objs + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp, + struct i40iw_cqp_request *cqp_request) +{ + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); + + if (cqp_request->waiting) { + cqp_request->compl_info.error = true; + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + } + i40iw_put_cqp_request(cqp, cqp_request); + wait_event_timeout(iwdev->close_wq, + !atomic_read(&cqp_request->refcount), + 1000); +} + +/** + * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions + * @iwdev: iwarp device + */ +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp *cqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request = NULL; + struct cqp_commands_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring); + wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring); + for (i = 0; i < pending_work; i++) { + cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx]; + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring); + } + + while (!list_empty(&dev->cqp_cmd_head)) { + pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); + cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info); + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + } +} + +/** + * i40iw_free_qp - callback after destroy cqp completes + * @cqp_request: cqp request for destroy qp + * @num: not used + */ +static void i40iw_free_qp(struct i40iw_cqp_request *cqp_request, u32 num) +{ + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)cqp_request->param; + struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; + struct i40iw_device *iwdev; + u32 qp_num = iwqp->ibqp.qp_num; + + iwdev = iwqp->iwdev; + + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + i40iw_rem_devusecount(iwdev); +} + +/** + * i40iw_wait_event - wait for completion + * @iwdev: iwarp device + * @cqp_request: cqp request to wait + */ +static int i40iw_wait_event(struct i40iw_device *iwdev, + struct i40iw_cqp_request *cqp_request) +{ + struct cqp_commands_info *info = &cqp_request->info; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_timeout cqp_timeout; + bool cqp_error = false; + int err_code = 0; + memset(&cqp_timeout, 0, sizeof(cqp_timeout)); + cqp_timeout.compl_cqp_cmds = iwdev->sc_dev.cqp_cmd_stats[OP_COMPLETED_COMMANDS]; + do { + if (wait_event_timeout(cqp_request->waitq, + cqp_request->request_done, CQP_COMPL_WAIT_TIME)) + break; + + i40iw_check_cqp_progress(&cqp_timeout, &iwdev->sc_dev); + + if (cqp_timeout.count < CQP_TIMEOUT_THRESHOLD) + continue; + + i40iw_pr_err("error cqp command 0x%x timed out", info->cqp_cmd); + err_code = -ETIME; + if (!iwdev->reset) { + iwdev->reset = true; + i40iw_request_reset(iwdev); + } + goto done; + } while (1); + cqp_error = cqp_request->compl_info.error; + if (cqp_error) { + i40iw_pr_err("error cqp command 0x%x completion maj = 0x%x min=0x%x\n", + info->cqp_cmd, cqp_request->compl_info.maj_err_code, + cqp_request->compl_info.min_err_code); + err_code = -EPROTO; + goto done; + } +done: + i40iw_put_cqp_request(iwcqp, cqp_request); + return err_code; +} + +/** + * i40iw_handle_cqp_op - process cqp command + * @iwdev: iwarp device + * @cqp_request: cqp request to process + */ +enum i40iw_status_code i40iw_handle_cqp_op(struct i40iw_device *iwdev, + struct i40iw_cqp_request + *cqp_request) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + enum i40iw_status_code status; + struct cqp_commands_info *info = &cqp_request->info; + int err_code = 0; + + if (iwdev->reset) { + i40iw_free_cqp_request(&iwdev->cqp, cqp_request); + return I40IW_ERR_CQP_COMPL_ERROR; + } + + status = i40iw_process_cqp_cmd(dev, info); + if (status) { + i40iw_pr_err("error cqp command 0x%x failed\n", info->cqp_cmd); + i40iw_free_cqp_request(&iwdev->cqp, cqp_request); + return status; + } + if (cqp_request->waiting) + err_code = i40iw_wait_event(iwdev, cqp_request); + if (err_code) + status = I40IW_ERR_CQP_COMPL_ERROR; + return status; +} + +/** + * i40iw_add_devusecount - add dev refcount + * @iwdev: dev for refcount + */ +void i40iw_add_devusecount(struct i40iw_device *iwdev) +{ + atomic64_inc(&iwdev->use_count); +} + +/** + * i40iw_rem_devusecount - decrement refcount for dev + * @iwdev: device + */ +void i40iw_rem_devusecount(struct i40iw_device *iwdev) +{ + if (!atomic64_dec_and_test(&iwdev->use_count)) + return; + wake_up(&iwdev->close_wq); +} + +/** + * i40iw_add_pdusecount - add pd refcount + * @iwpd: pd for refcount + */ +void i40iw_add_pdusecount(struct i40iw_pd *iwpd) +{ + atomic_inc(&iwpd->usecount); +} + +/** + * i40iw_rem_pdusecount - decrement refcount for pd and free if 0 + * @iwpd: pd for refcount + * @iwdev: iwarp device + */ +void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev) +{ + if (!atomic_dec_and_test(&iwpd->usecount)) + return; + i40iw_free_resource(iwdev, iwdev->allocated_pds, iwpd->sc_pd.pd_id); + kfree(iwpd); +} + +/** + * i40iw_add_ref - add refcount for qp + * @ibqp: iqarp qp + */ +void i40iw_add_ref(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = (struct i40iw_qp *)ibqp; + + atomic_inc(&iwqp->refcount); +} + +/** + * i40iw_rem_ref - rem refcount for qp and free if 0 + * @ibqp: iqarp qp + */ +void i40iw_rem_ref(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp; + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev; + u32 qp_num; + unsigned long flags; + + iwqp = to_iwqp(ibqp); + iwdev = iwqp->iwdev; + spin_lock_irqsave(&iwdev->qptable_lock, flags); + if (!atomic_dec_and_test(&iwqp->refcount)) { + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + return; + } + + qp_num = iwqp->ibqp.qp_num; + iwdev->qp_table[qp_num] = NULL; + spin_unlock_irqrestore(&iwdev->qptable_lock, flags); + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_request->callback_fcn = i40iw_free_qp; + cqp_request->param = (void *)&iwqp->sc_qp; + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_QP_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_destroy.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.qp_destroy.remove_hash_idx = true; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + return; + + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + i40iw_rem_devusecount(iwdev); +} + +/** + * i40iw_get_qp - get qp address + * @device: iwarp device + * @qpn: qp number + */ +struct ib_qp *i40iw_get_qp(struct ib_device *device, int qpn) +{ + struct i40iw_device *iwdev = to_iwdev(device); + + if ((qpn < IW_FIRST_QPN) || (qpn >= iwdev->max_qp)) + return NULL; + + return &iwdev->qp_table[qpn]->ibqp; +} + +/** + * i40iw_debug_buf - print debug msg and buffer is mask set + * @dev: hardware control device structure + * @mask: mask to compare if to print debug buffer + * @buf: points buffer addr + * @size: saize of buffer to print + */ +void i40iw_debug_buf(struct i40iw_sc_dev *dev, + enum i40iw_debug_flag mask, + char *desc, + u64 *buf, + u32 size) +{ + u32 i; + + if (!(dev->debug_mask & mask)) + return; + i40iw_debug(dev, mask, "%s\n", desc); + i40iw_debug(dev, mask, "starting address virt=%p phy=%llxh\n", buf, + (unsigned long long)virt_to_phys(buf)); + + for (i = 0; i < size; i += 8) + i40iw_debug(dev, mask, "index %03d val: %016llx\n", i, buf[i / 8]); +} + +/** + * i40iw_get_hw_addr - return hw addr + * @par: points to shared dev + */ +u8 __iomem *i40iw_get_hw_addr(void *par) +{ + struct i40iw_sc_dev *dev = (struct i40iw_sc_dev *)par; + + return dev->hw->hw_addr; +} + +/** + * i40iw_remove_head - return head entry and remove from list + * @list: list for entry + */ +void *i40iw_remove_head(struct list_head *list) +{ + struct list_head *entry; + + if (list_empty(list)) + return NULL; + + entry = (void *)list->next; + list_del(entry); + return (void *)entry; +} + +/** + * i40iw_allocate_dma_mem - Memory alloc helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + * @alignment: what to align the allocation to + */ +enum i40iw_status_code i40iw_allocate_dma_mem(struct i40iw_hw *hw, + struct i40iw_dma_mem *mem, + u64 size, + u32 alignment) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + + if (!mem) + return I40IW_ERR_PARAM; + mem->size = ALIGN(size, alignment); + mem->va = dma_zalloc_coherent(&pcidev->dev, mem->size, + (dma_addr_t *)&mem->pa, GFP_KERNEL); + if (!mem->va) + return I40IW_ERR_NO_MEMORY; + return 0; +} + +/** + * i40iw_free_dma_mem - Memory free helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + */ +void i40iw_free_dma_mem(struct i40iw_hw *hw, struct i40iw_dma_mem *mem) +{ + struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context; + + if (!mem || !mem->va) + return; + + dma_free_coherent(&pcidev->dev, mem->size, + mem->va, (dma_addr_t)mem->pa); + mem->va = NULL; +} + +/** + * i40iw_allocate_virt_mem - virtual memory alloc helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to fill out + * @size: size of memory requested + */ +enum i40iw_status_code i40iw_allocate_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem, + u32 size) +{ + if (!mem) + return I40IW_ERR_PARAM; + + mem->size = size; + mem->va = kzalloc(size, GFP_KERNEL); + + if (mem->va) + return 0; + else + return I40IW_ERR_NO_MEMORY; +} + +/** + * i40iw_free_virt_mem - virtual memory free helper fn + * @hw: pointer to the HW structure + * @mem: ptr to mem struct to free + */ +enum i40iw_status_code i40iw_free_virt_mem(struct i40iw_hw *hw, + struct i40iw_virt_mem *mem) +{ + if (!mem) + return I40IW_ERR_PARAM; + /* + * mem->va points to the parent of mem, so both mem and mem->va + * can not be touched once mem->va is freed + */ + kfree(mem->va); + return 0; +} + +/** + * i40iw_cqp_sds_cmd - create cqp command for sd + * @dev: hardware control device structure + * @sd_info: information for sd cqp + * + */ +enum i40iw_status_code i40iw_cqp_sds_cmd(struct i40iw_sc_dev *dev, + struct i40iw_update_sds_info *sdinfo) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + memcpy(&cqp_info->in.u.update_pe_sds.info, sdinfo, + sizeof(cqp_info->in.u.update_pe_sds.info)); + cqp_info->cqp_cmd = OP_UPDATE_PE_SDS; + cqp_info->post_sq = 1; + cqp_info->in.u.update_pe_sds.dev = dev; + cqp_info->in.u.update_pe_sds.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Update SD's fail"); + return status; +} + +/** + * i40iw_qp_suspend_resume - cqp command for suspend/resume + * @dev: hardware control device structure + * @qp: hardware control qp + * @suspend: flag if suspend or resume + */ +void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cqp_request *cqp_request; + struct i40iw_sc_cqp *cqp = dev->cqp; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = (suspend) ? OP_SUSPEND : OP_RESUME; + cqp_info->in.u.suspend_resume.cqp = cqp; + cqp_info->in.u.suspend_resume.qp = qp; + cqp_info->in.u.suspend_resume.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP QP Suspend/Resume fail"); +} + +/** + * i40iw_term_modify_qp - modify qp for term message + * @qp: hardware control qp + * @next_state: qp's next state + * @term: terminate code + * @term_len: length + */ +void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + i40iw_next_iw_state(iwqp, next_state, 0, term, term_len); +}; + +/** + * i40iw_terminate_done - after terminate is completed + * @qp: hardware control qp + * @timeout_occurred: indicates if terminate timer expired + */ +void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred) +{ + struct i40iw_qp *iwqp; + u32 next_iwarp_state = I40IW_QP_STATE_ERROR; + u8 hte = 0; + bool first_time; + unsigned long flags; + + iwqp = (struct i40iw_qp *)qp->back_qp; + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->hte_added) { + iwqp->hte_added = 0; + hte = 1; + } + first_time = !(qp->term_flags & I40IW_TERM_DONE); + qp->term_flags |= I40IW_TERM_DONE; + spin_unlock_irqrestore(&iwqp->lock, flags); + if (first_time) { + if (!timeout_occurred) + i40iw_terminate_del_timer(qp); + else + next_iwarp_state = I40IW_QP_STATE_CLOSING; + + i40iw_next_iw_state(iwqp, next_iwarp_state, hte, 0, 0); + i40iw_cm_disconn(iwqp); + } +} + +/** + * i40iw_terminate_imeout - timeout happened + * @context: points to iwarp qp + */ +static void i40iw_terminate_timeout(struct timer_list *t) +{ + struct i40iw_qp *iwqp = from_timer(iwqp, t, terminate_timer); + struct i40iw_sc_qp *qp = (struct i40iw_sc_qp *)&iwqp->sc_qp; + + i40iw_terminate_done(qp, 1); + i40iw_rem_ref(&iwqp->ibqp); +} + +/** + * i40iw_terminate_start_timer - start terminate timeout + * @qp: hardware control qp + */ +void i40iw_terminate_start_timer(struct i40iw_sc_qp *qp) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + i40iw_add_ref(&iwqp->ibqp); + timer_setup(&iwqp->terminate_timer, i40iw_terminate_timeout, 0); + iwqp->terminate_timer.expires = jiffies + HZ; + add_timer(&iwqp->terminate_timer); +} + +/** + * i40iw_terminate_del_timer - delete terminate timeout + * @qp: hardware control qp + */ +void i40iw_terminate_del_timer(struct i40iw_sc_qp *qp) +{ + struct i40iw_qp *iwqp; + + iwqp = (struct i40iw_qp *)qp->back_qp; + if (del_timer(&iwqp->terminate_timer)) + i40iw_rem_ref(&iwqp->ibqp); +} + +/** + * i40iw_cqp_generic_worker - generic worker for cqp + * @work: work pointer + */ +static void i40iw_cqp_generic_worker(struct work_struct *work) +{ + struct i40iw_virtchnl_work_info *work_info = + &((struct virtchnl_work *)work)->work_info; + + if (work_info->worker_vf_dev) + work_info->callback_fcn(work_info->worker_vf_dev); +} + +/** + * i40iw_cqp_spawn_worker - spawn worket thread + * @iwdev: device struct pointer + * @work_info: work request info + * @iw_vf_idx: virtual function index + */ +void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_work_info *work_info, + u32 iw_vf_idx) +{ + struct virtchnl_work *work; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + work = &iwdev->virtchnl_w[iw_vf_idx]; + memcpy(&work->work_info, work_info, sizeof(*work_info)); + INIT_WORK(&work->work, i40iw_cqp_generic_worker); + queue_work(iwdev->virtchnl_wq, &work->work); +} + +/** + * i40iw_cqp_manage_hmc_fcn_worker - + * @work: work pointer for hmc info + */ +static void i40iw_cqp_manage_hmc_fcn_worker(struct work_struct *work) +{ + struct i40iw_cqp_request *cqp_request = + ((struct virtchnl_work *)work)->cqp_request; + struct i40iw_ccq_cqe_info ccq_cqe_info; + struct i40iw_hmc_fcn_info *hmcfcninfo = + &cqp_request->info.in.u.manage_hmc_pm.info; + struct i40iw_device *iwdev = + (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev->back_dev; + + ccq_cqe_info.cqp = NULL; + ccq_cqe_info.maj_err_code = cqp_request->compl_info.maj_err_code; + ccq_cqe_info.min_err_code = cqp_request->compl_info.min_err_code; + ccq_cqe_info.op_code = cqp_request->compl_info.op_code; + ccq_cqe_info.op_ret_val = cqp_request->compl_info.op_ret_val; + ccq_cqe_info.scratch = 0; + ccq_cqe_info.error = cqp_request->compl_info.error; + hmcfcninfo->callback_fcn(cqp_request->info.in.u.manage_hmc_pm.dev, + hmcfcninfo->cqp_callback_param, &ccq_cqe_info); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); +} + +/** + * i40iw_cqp_manage_hmc_fcn_callback - called function after cqp completion + * @cqp_request: cqp request info struct for hmc fun + * @unused: unused param of callback + */ +static void i40iw_cqp_manage_hmc_fcn_callback(struct i40iw_cqp_request *cqp_request, + u32 unused) +{ + struct virtchnl_work *work; + struct i40iw_hmc_fcn_info *hmcfcninfo = + &cqp_request->info.in.u.manage_hmc_pm.info; + struct i40iw_device *iwdev = + (struct i40iw_device *)cqp_request->info.in.u.manage_hmc_pm.dev-> + back_dev; + + if (hmcfcninfo && hmcfcninfo->callback_fcn) { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s1\n", __func__); + atomic_inc(&cqp_request->refcount); + work = &iwdev->virtchnl_w[hmcfcninfo->iw_vf_idx]; + work->cqp_request = cqp_request; + INIT_WORK(&work->work, i40iw_cqp_manage_hmc_fcn_worker); + queue_work(iwdev->virtchnl_wq, &work->work); + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s2\n", __func__); + } else { + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s: Something wrong\n", __func__); + } +} + +/** + * i40iw_cqp_manage_hmc_fcn_cmd - issue cqp command to manage hmc + * @dev: hardware control device structure + * @hmcfcninfo: info for hmc + */ +enum i40iw_status_code i40iw_cqp_manage_hmc_fcn_cmd(struct i40iw_sc_dev *dev, + struct i40iw_hmc_fcn_info *hmcfcninfo) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_HMC, "%s\n", __func__); + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->callback_fcn = i40iw_cqp_manage_hmc_fcn_callback; + cqp_request->param = hmcfcninfo; + memcpy(&cqp_info->in.u.manage_hmc_pm.info, hmcfcninfo, + sizeof(*hmcfcninfo)); + cqp_info->in.u.manage_hmc_pm.dev = dev; + cqp_info->cqp_cmd = OP_MANAGE_HMC_PM_FUNC_TABLE; + cqp_info->post_sq = 1; + cqp_info->in.u.manage_hmc_pm.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Manage HMC fail"); + return status; +} + +/** + * i40iw_cqp_query_fpm_values_cmd - send cqp command for fpm + * @iwdev: function device struct + * @values_mem: buffer for fpm + * @hmc_fn_id: function id for fpm + */ +enum i40iw_status_code i40iw_cqp_query_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->param = NULL; + cqp_info->in.u.query_fpm_values.cqp = dev->cqp; + cqp_info->in.u.query_fpm_values.fpm_values_pa = values_mem->pa; + cqp_info->in.u.query_fpm_values.fpm_values_va = values_mem->va; + cqp_info->in.u.query_fpm_values.hmc_fn_id = hmc_fn_id; + cqp_info->cqp_cmd = OP_QUERY_FPM_VALUES; + cqp_info->post_sq = 1; + cqp_info->in.u.query_fpm_values.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Query FPM fail"); + return status; +} + +/** + * i40iw_cqp_commit_fpm_values_cmd - commit fpm values in hw + * @dev: hardware control device structure + * @values_mem: buffer with fpm values + * @hmc_fn_id: function id for fpm + */ +enum i40iw_status_code i40iw_cqp_commit_fpm_values_cmd(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *values_mem, + u8 hmc_fn_id) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + cqp_info = &cqp_request->info; + cqp_request->param = NULL; + cqp_info->in.u.commit_fpm_values.cqp = dev->cqp; + cqp_info->in.u.commit_fpm_values.fpm_values_pa = values_mem->pa; + cqp_info->in.u.commit_fpm_values.fpm_values_va = values_mem->va; + cqp_info->in.u.commit_fpm_values.hmc_fn_id = hmc_fn_id; + cqp_info->cqp_cmd = OP_COMMIT_FPM_VALUES; + cqp_info->post_sq = 1; + cqp_info->in.u.commit_fpm_values.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Commit FPM fail"); + return status; +} + +/** + * i40iw_vf_wait_vchnl_resp - wait for channel msg + * @iwdev: function's device struct + */ +enum i40iw_status_code i40iw_vf_wait_vchnl_resp(struct i40iw_sc_dev *dev) +{ + struct i40iw_device *iwdev = dev->back_dev; + int timeout_ret; + + i40iw_debug(dev, I40IW_DEBUG_VIRT, "%s[%u] dev %p, iwdev %p\n", + __func__, __LINE__, dev, iwdev); + + atomic_set(&iwdev->vchnl_msgs, 2); + timeout_ret = wait_event_timeout(iwdev->vchnl_waitq, + (atomic_read(&iwdev->vchnl_msgs) == 1), + I40IW_VCHNL_EVENT_TIMEOUT); + atomic_dec(&iwdev->vchnl_msgs); + if (!timeout_ret) { + i40iw_pr_err("virt channel completion timeout = 0x%x\n", timeout_ret); + atomic_set(&iwdev->vchnl_msgs, 0); + dev->vchnl_up = false; + return I40IW_ERR_TIMEOUT; + } + wake_up(&dev->vf_reqs); + return 0; +} + +/** + * i40iw_cqp_cq_create_cmd - create a cq for the cqp + * @dev: device pointer + * @cq: pointer to created cq + */ +enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, + struct i40iw_sc_cq *cq) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_CQ_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_create.cq = cq; + cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Create QP fail"); + + return status; +} + +/** + * i40iw_cqp_qp_create_cmd - create a qp for the cqp + * @dev: device pointer + * @qp: pointer to created qp + */ +enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, + struct i40iw_sc_qp *qp) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_create_qp_info *qp_info; + enum i40iw_status_code status; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) + return I40IW_ERR_NO_MEMORY; + + cqp_info = &cqp_request->info; + qp_info = &cqp_request->info.in.u.qp_create.info; + + memset(qp_info, 0, sizeof(*qp_info)); + + qp_info->cq_num_valid = true; + qp_info->next_iwarp_state = I40IW_QP_STATE_RTS; + + cqp_info->cqp_cmd = OP_QP_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_create.qp = qp; + cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP QP create fail"); + return status; +} + +/** + * i40iw_cqp_cq_destroy_cmd - destroy the cqp cq + * @dev: device pointer + * @cq: pointer to cq + */ +void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_cq_wq_destroy(iwdev, cq); +} + +/** + * i40iw_cqp_qp_destroy_cmd - destroy the cqp + * @dev: device pointer + * @qp: pointer to qp + */ +void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + memset(cqp_info, 0, sizeof(*cqp_info)); + + cqp_info->cqp_cmd = OP_QP_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_destroy.qp = qp; + cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; + cqp_info->in.u.qp_destroy.remove_hash_idx = true; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP QP_DESTROY fail"); +} + + +/** + * i40iw_ieq_mpa_crc_ae - generate AE for crc error + * @dev: hardware control device structure + * @qp: hardware control qp + */ +void i40iw_ieq_mpa_crc_ae(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) +{ + struct i40iw_gen_ae_info info; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + i40iw_debug(dev, I40IW_DEBUG_AEQ, "%s entered\n", __func__); + info.ae_code = I40IW_AE_LLP_RECEIVED_MPA_CRC_ERROR; + info.ae_source = I40IW_AE_SOURCE_RQ; + i40iw_gen_ae(iwdev, qp, &info, false); +} + +/** + * i40iw_init_hash_desc - initialize hash for crc calculation + * @desc: cryption type + */ +enum i40iw_status_code i40iw_init_hash_desc(struct shash_desc **desc) +{ + struct crypto_shash *tfm; + struct shash_desc *tdesc; + + tfm = crypto_alloc_shash("crc32c", 0, 0); + if (IS_ERR(tfm)) + return I40IW_ERR_MPA_CRC; + + tdesc = kzalloc(sizeof(*tdesc) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!tdesc) { + crypto_free_shash(tfm); + return I40IW_ERR_MPA_CRC; + } + tdesc->tfm = tfm; + *desc = tdesc; + + return 0; +} + +/** + * i40iw_free_hash_desc - free hash desc + * @desc: to be freed + */ +void i40iw_free_hash_desc(struct shash_desc *desc) +{ + if (desc) { + crypto_free_shash(desc->tfm); + kfree(desc); + } +} + +/** + * i40iw_alloc_query_fpm_buf - allocate buffer for fpm + * @dev: hardware control device structure + * @mem: buffer ptr for fpm to be allocated + * @return: memory allocation status + */ +enum i40iw_status_code i40iw_alloc_query_fpm_buf(struct i40iw_sc_dev *dev, + struct i40iw_dma_mem *mem) +{ + enum i40iw_status_code status; + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + + status = i40iw_obj_aligned_mem(iwdev, mem, I40IW_QUERY_FPM_BUF_SIZE, + I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); + return status; +} + +/** + * i40iw_ieq_check_mpacrc - check if mpa crc is OK + * @desc: desc for hash + * @addr: address of buffer for crc + * @length: length of buffer + * @value: value to be compared + */ +enum i40iw_status_code i40iw_ieq_check_mpacrc(struct shash_desc *desc, + void *addr, + u32 length, + u32 value) +{ + u32 crc = 0; + int ret; + enum i40iw_status_code ret_code = 0; + + crypto_shash_init(desc); + ret = crypto_shash_update(desc, addr, length); + if (!ret) + crypto_shash_final(desc, (u8 *)&crc); + if (crc != value) { + i40iw_pr_err("mpa crc check fail\n"); + ret_code = I40IW_ERR_MPA_CRC; + } + return ret_code; +} + +/** + * i40iw_ieq_get_qp - get qp based on quad in puda buffer + * @dev: hardware control device structure + * @buf: receive puda buffer on exception q + */ +struct i40iw_sc_qp *i40iw_ieq_get_qp(struct i40iw_sc_dev *dev, + struct i40iw_puda_buf *buf) +{ + struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; + struct i40iw_qp *iwqp; + struct i40iw_cm_node *cm_node; + u32 loc_addr[4], rem_addr[4]; + u16 loc_port, rem_port; + struct ipv6hdr *ip6h; + struct iphdr *iph = (struct iphdr *)buf->iph; + struct tcphdr *tcph = (struct tcphdr *)buf->tcph; + + if (iph->version == 4) { + memset(loc_addr, 0, sizeof(loc_addr)); + loc_addr[0] = ntohl(iph->daddr); + memset(rem_addr, 0, sizeof(rem_addr)); + rem_addr[0] = ntohl(iph->saddr); + } else { + ip6h = (struct ipv6hdr *)buf->iph; + i40iw_copy_ip_ntohl(loc_addr, ip6h->daddr.in6_u.u6_addr32); + i40iw_copy_ip_ntohl(rem_addr, ip6h->saddr.in6_u.u6_addr32); + } + loc_port = ntohs(tcph->dest); + rem_port = ntohs(tcph->source); + + cm_node = i40iw_find_node(&iwdev->cm_core, rem_port, rem_addr, loc_port, + loc_addr, false, true); + if (!cm_node) + return NULL; + iwqp = cm_node->iwqp; + return &iwqp->sc_qp; +} + +/** + * i40iw_ieq_update_tcpip_info - update tcpip in the buffer + * @buf: puda to update + * @length: length of buffer + * @seqnum: seq number for tcp + */ +void i40iw_ieq_update_tcpip_info(struct i40iw_puda_buf *buf, u16 length, u32 seqnum) +{ + struct tcphdr *tcph; + struct iphdr *iph; + u16 iphlen; + u16 packetsize; + u8 *addr = (u8 *)buf->mem.va; + + iphlen = (buf->ipv4) ? 20 : 40; + iph = (struct iphdr *)(addr + buf->maclen); + tcph = (struct tcphdr *)(addr + buf->maclen + iphlen); + packetsize = length + buf->tcphlen + iphlen; + + iph->tot_len = htons(packetsize); + tcph->seq = htonl(seqnum); +} + +/** + * i40iw_puda_get_tcpip_info - get tcpip info from puda buffer + * @info: to get information + * @buf: puda buffer + */ +enum i40iw_status_code i40iw_puda_get_tcpip_info(struct i40iw_puda_completion_info *info, + struct i40iw_puda_buf *buf) +{ + struct iphdr *iph; + struct ipv6hdr *ip6h; + struct tcphdr *tcph; + u16 iphlen; + u16 pkt_len; + u8 *mem = (u8 *)buf->mem.va; + struct ethhdr *ethh = (struct ethhdr *)buf->mem.va; + + if (ethh->h_proto == htons(0x8100)) { + info->vlan_valid = true; + buf->vlan_id = ntohs(((struct vlan_ethhdr *)ethh)->h_vlan_TCI) & VLAN_VID_MASK; + } + buf->maclen = (info->vlan_valid) ? 18 : 14; + iphlen = (info->l3proto) ? 40 : 20; + buf->ipv4 = (info->l3proto) ? false : true; + buf->iph = mem + buf->maclen; + iph = (struct iphdr *)buf->iph; + + buf->tcph = buf->iph + iphlen; + tcph = (struct tcphdr *)buf->tcph; + + if (buf->ipv4) { + pkt_len = ntohs(iph->tot_len); + } else { + ip6h = (struct ipv6hdr *)buf->iph; + pkt_len = ntohs(ip6h->payload_len) + iphlen; + } + + buf->totallen = pkt_len + buf->maclen; + + if (info->payload_len < buf->totallen) { + i40iw_pr_err("payload_len = 0x%x totallen expected0x%x\n", + info->payload_len, buf->totallen); + return I40IW_ERR_INVALID_SIZE; + } + + buf->tcphlen = (tcph->doff) << 2; + buf->datalen = pkt_len - iphlen - buf->tcphlen; + buf->data = (buf->datalen) ? buf->tcph + buf->tcphlen : NULL; + buf->hdrlen = buf->maclen + iphlen + buf->tcphlen; + buf->seqnum = ntohl(tcph->seq); + return 0; +} + +/** + * i40iw_hw_stats_timeout - Stats timer-handler which updates all HW stats + * @vsi: pointer to the vsi structure + */ +static void i40iw_hw_stats_timeout(struct timer_list *t) +{ + struct i40iw_vsi_pestat *pf_devstat = from_timer(pf_devstat, t, + stats_timer); + struct i40iw_sc_vsi *sc_vsi = pf_devstat->vsi; + struct i40iw_sc_dev *pf_dev = sc_vsi->dev; + struct i40iw_vsi_pestat *vf_devstat = NULL; + u16 iw_vf_idx; + unsigned long flags; + + /*PF*/ + i40iw_hw_stats_read_all(pf_devstat, &pf_devstat->hw_stats); + + for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { + spin_lock_irqsave(&pf_devstat->lock, flags); + if (pf_dev->vf_dev[iw_vf_idx]) { + if (pf_dev->vf_dev[iw_vf_idx]->stats_initialized) { + vf_devstat = &pf_dev->vf_dev[iw_vf_idx]->pestat; + i40iw_hw_stats_read_all(vf_devstat, &vf_devstat->hw_stats); + } + } + spin_unlock_irqrestore(&pf_devstat->lock, flags); + } + + mod_timer(&pf_devstat->stats_timer, + jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); +} + +/** + * i40iw_hw_stats_start_timer - Start periodic stats timer + * @vsi: pointer to the vsi structure + */ +void i40iw_hw_stats_start_timer(struct i40iw_sc_vsi *vsi) +{ + struct i40iw_vsi_pestat *devstat = vsi->pestat; + + timer_setup(&devstat->stats_timer, i40iw_hw_stats_timeout, 0); + mod_timer(&devstat->stats_timer, + jiffies + msecs_to_jiffies(STATS_TIMER_DELAY)); +} + +/** + * i40iw_hw_stats_stop_timer - Delete periodic stats timer + * @vsi: pointer to the vsi structure + */ +void i40iw_hw_stats_stop_timer(struct i40iw_sc_vsi *vsi) +{ + struct i40iw_vsi_pestat *devstat = vsi->pestat; + + del_timer_sync(&devstat->stats_timer); +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c new file mode 100644 index 000000000..314d19153 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -0,0 +1,2903 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/random.h> +#include <linux/highmem.h> +#include <linux/time.h> +#include <linux/hugetlb.h> +#include <linux/irq.h> +#include <asm/byteorder.h> +#include <net/ip.h> +#include <rdma/ib_verbs.h> +#include <rdma/iw_cm.h> +#include <rdma/ib_user_verbs.h> +#include <rdma/ib_umem.h> +#include "i40iw.h" + +/** + * i40iw_query_device - get device attributes + * @ibdev: device pointer from stack + * @props: returning device attributes + * @udata: user data + */ +static int i40iw_query_device(struct ib_device *ibdev, + struct ib_device_attr *props, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + + if (udata->inlen || udata->outlen) + return -EINVAL; + memset(props, 0, sizeof(*props)); + ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + props->fw_ver = I40IW_FW_VERSION; + props->device_cap_flags = iwdev->device_cap_flags; + props->vendor_id = iwdev->ldev->pcidev->vendor; + props->vendor_part_id = iwdev->ldev->pcidev->device; + props->hw_ver = (u32)iwdev->sc_dev.hw_rev; + props->max_mr_size = I40IW_MAX_OUTBOUND_MESSAGE_SIZE; + props->max_qp = iwdev->max_qp - iwdev->used_qps; + props->max_qp_wr = I40IW_MAX_QP_WRS; + props->max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + props->max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + props->max_cq = iwdev->max_cq - iwdev->used_cqs; + props->max_cqe = iwdev->max_cqe; + props->max_mr = iwdev->max_mr - iwdev->used_mrs; + props->max_pd = iwdev->max_pd - iwdev->used_pds; + props->max_sge_rd = I40IW_MAX_SGE_RD; + props->max_qp_rd_atom = I40IW_MAX_IRD_SIZE; + props->max_qp_init_rd_atom = props->max_qp_rd_atom; + props->atomic_cap = IB_ATOMIC_NONE; + props->max_map_per_fmr = 1; + props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR; + return 0; +} + +/** + * i40iw_query_port - get port attrubutes + * @ibdev: device pointer from stack + * @port: port number for query + * @props: returning device attributes + */ +static int i40iw_query_port(struct ib_device *ibdev, + u8 port, + struct ib_port_attr *props) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct net_device *netdev = iwdev->netdev; + + /* props being zeroed by the caller, avoid zeroing it here */ + props->max_mtu = IB_MTU_4096; + props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); + + props->lid = 1; + if (netif_carrier_ok(iwdev->netdev)) + props->state = IB_PORT_ACTIVE; + else + props->state = IB_PORT_DOWN; + props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | + IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; + props->gid_tbl_len = 1; + props->pkey_tbl_len = 1; + props->active_width = IB_WIDTH_4X; + props->active_speed = 1; + props->max_msg_sz = I40IW_MAX_OUTBOUND_MESSAGE_SIZE; + return 0; +} + +/** + * i40iw_alloc_ucontext - Allocate the user context data structure + * @ibdev: device pointer from stack + * @udata: user data + * + * This keeps track of all objects associated with a particular + * user-mode client. + */ +static struct ib_ucontext *i40iw_alloc_ucontext(struct ib_device *ibdev, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_alloc_ucontext_req req; + struct i40iw_alloc_ucontext_resp uresp; + struct i40iw_ucontext *ucontext; + + if (ib_copy_from_udata(&req, udata, sizeof(req))) + return ERR_PTR(-EINVAL); + + if (req.userspace_ver < 4 || req.userspace_ver > I40IW_ABI_VER) { + i40iw_pr_err("Unsupported provider library version %u.\n", req.userspace_ver); + return ERR_PTR(-EINVAL); + } + + memset(&uresp, 0, sizeof(uresp)); + uresp.max_qps = iwdev->max_qp; + uresp.max_pds = iwdev->max_pd; + uresp.wq_size = iwdev->max_qp_wr * 2; + uresp.kernel_ver = req.userspace_ver; + + ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL); + if (!ucontext) + return ERR_PTR(-ENOMEM); + + ucontext->iwdev = iwdev; + ucontext->abi_ver = req.userspace_ver; + + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { + kfree(ucontext); + return ERR_PTR(-EFAULT); + } + + INIT_LIST_HEAD(&ucontext->cq_reg_mem_list); + spin_lock_init(&ucontext->cq_reg_mem_list_lock); + INIT_LIST_HEAD(&ucontext->qp_reg_mem_list); + spin_lock_init(&ucontext->qp_reg_mem_list_lock); + + return &ucontext->ibucontext; +} + +/** + * i40iw_dealloc_ucontext - deallocate the user context data structure + * @context: user context created during alloc + */ +static int i40iw_dealloc_ucontext(struct ib_ucontext *context) +{ + struct i40iw_ucontext *ucontext = to_ucontext(context); + unsigned long flags; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->cq_reg_mem_list)) { + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + return -EBUSY; + } + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (!list_empty(&ucontext->qp_reg_mem_list)) { + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + return -EBUSY; + } + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + + kfree(ucontext); + return 0; +} + +/** + * i40iw_mmap - user memory map + * @context: context created during alloc + * @vma: kernel info for user memory map + */ +static int i40iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) +{ + struct i40iw_ucontext *ucontext = to_ucontext(context); + u64 dbaddr; + + if (vma->vm_pgoff || vma->vm_end - vma->vm_start != PAGE_SIZE) + return -EINVAL; + + dbaddr = I40IW_DB_ADDR_OFFSET + pci_resource_start(ucontext->iwdev->ldev->pcidev, 0); + + if (io_remap_pfn_range(vma, vma->vm_start, dbaddr >> PAGE_SHIFT, PAGE_SIZE, + pgprot_noncached(vma->vm_page_prot))) + return -EAGAIN; + + return 0; +} + +/** + * i40iw_alloc_push_page - allocate a push page for qp + * @iwdev: iwarp device + * @qp: hardware control qp + */ +static void i40iw_alloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) +{ + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + if (qp->push_idx != I40IW_INVALID_PUSH_PAGE_INDEX) + return; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return; + + atomic_inc(&cqp_request->refcount); + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; + cqp_info->post_sq = 1; + + cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle; + cqp_info->in.u.manage_push_page.info.free_page = 0; + cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + qp->push_idx = cqp_request->compl_info.op_ret_val; + else + i40iw_pr_err("CQP-OP Push page fail"); + i40iw_put_cqp_request(&iwdev->cqp, cqp_request); +} + +/** + * i40iw_dealloc_push_page - free a push page for qp + * @iwdev: iwarp device + * @qp: hardware control qp + */ +static void i40iw_dealloc_push_page(struct i40iw_device *iwdev, struct i40iw_sc_qp *qp) +{ + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + enum i40iw_status_code status; + + if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) + return; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_MANAGE_PUSH_PAGE; + cqp_info->post_sq = 1; + + cqp_info->in.u.manage_push_page.info.push_idx = qp->push_idx; + cqp_info->in.u.manage_push_page.info.qs_handle = qp->qs_handle; + cqp_info->in.u.manage_push_page.info.free_page = 1; + cqp_info->in.u.manage_push_page.cqp = &iwdev->cqp.sc_cqp; + cqp_info->in.u.manage_push_page.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (!status) + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + else + i40iw_pr_err("CQP-OP Push page fail"); +} + +/** + * i40iw_alloc_pd - allocate protection domain + * @ibdev: device pointer from stack + * @context: user context created during alloc + * @udata: user data + */ +static struct ib_pd *i40iw_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd; + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_alloc_pd_resp uresp; + struct i40iw_sc_pd *sc_pd; + struct i40iw_ucontext *ucontext; + u32 pd_id = 0; + int err; + + if (iwdev->closing) + return ERR_PTR(-ENODEV); + + err = i40iw_alloc_resource(iwdev, iwdev->allocated_pds, + iwdev->max_pd, &pd_id, &iwdev->next_pd); + if (err) { + i40iw_pr_err("alloc resource failed\n"); + return ERR_PTR(err); + } + + iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL); + if (!iwpd) { + err = -ENOMEM; + goto free_res; + } + + sc_pd = &iwpd->sc_pd; + + if (context) { + ucontext = to_ucontext(context); + dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, ucontext->abi_ver); + memset(&uresp, 0, sizeof(uresp)); + uresp.pd_id = pd_id; + if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { + err = -EFAULT; + goto error; + } + } else { + dev->iw_pd_ops->pd_init(dev, sc_pd, pd_id, -1); + } + + i40iw_add_pdusecount(iwpd); + return &iwpd->ibpd; +error: + kfree(iwpd); +free_res: + i40iw_free_resource(iwdev, iwdev->allocated_pds, pd_id); + return ERR_PTR(err); +} + +/** + * i40iw_dealloc_pd - deallocate pd + * @ibpd: ptr of pd to be deallocated + */ +static int i40iw_dealloc_pd(struct ib_pd *ibpd) +{ + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_device *iwdev = to_iwdev(ibpd->device); + + i40iw_rem_pdusecount(iwpd, iwdev); + return 0; +} + +/** + * i40iw_get_pbl - Retrieve pbl from a list given a virtual + * address + * @va: user virtual address + * @pbl_list: pbl list to search in (QP's or CQ's) + */ +static struct i40iw_pbl *i40iw_get_pbl(unsigned long va, + struct list_head *pbl_list) +{ + struct i40iw_pbl *iwpbl; + + list_for_each_entry(iwpbl, pbl_list, list) { + if (iwpbl->user_base == va) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + return iwpbl; + } + } + return NULL; +} + +/** + * i40iw_free_qp_resources - free up memory resources for qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @qp_num: qp number assigned + */ +void i40iw_free_qp_resources(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + u32 qp_num) +{ + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp); + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); + if (qp_num) + i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); + kfree(iwqp->kqp.wrid_mem); + iwqp->kqp.wrid_mem = NULL; + kfree(iwqp->allocated_buffer); +} + +/** + * i40iw_clean_cqes - clean cq entries for qp + * @iwqp: qp ptr (user or kernel) + * @iwcq: cq ptr + */ +static void i40iw_clean_cqes(struct i40iw_qp *iwqp, struct i40iw_cq *iwcq) +{ + struct i40iw_cq_uk *ukcq = &iwcq->sc_cq.cq_uk; + + ukcq->ops.iw_cq_clean(&iwqp->sc_qp.qp_uk, ukcq); +} + +/** + * i40iw_destroy_qp - destroy qp + * @ibqp: qp's ib pointer also to get to device's qp address + */ +static int i40iw_destroy_qp(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + + iwqp->destroyed = 1; + + if (iwqp->ibqp_state >= IB_QPS_INIT && iwqp->ibqp_state < IB_QPS_RTS) + i40iw_next_iw_state(iwqp, I40IW_QP_STATE_ERROR, 0, 0, 0); + + if (!iwqp->user_mode) { + if (iwqp->iwscq) { + i40iw_clean_cqes(iwqp, iwqp->iwscq); + if (iwqp->iwrcq != iwqp->iwscq) + i40iw_clean_cqes(iwqp, iwqp->iwrcq); + } + } + + i40iw_rem_ref(&iwqp->ibqp); + return 0; +} + +/** + * i40iw_setup_virt_qp - setup for allocation of virtual qp + * @dev: iwarp device + * @qp: qp ptr + * @init_info: initialize info to return + */ +static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + struct i40iw_qp_init_info *init_info) +{ + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; + + iwqp->page = qpmr->sq_page; + init_info->shadow_area_pa = cpu_to_le64(qpmr->shadow); + if (iwpbl->pbl_allocated) { + init_info->virtual_map = true; + init_info->sq_pa = qpmr->sq_pbl.idx; + init_info->rq_pa = qpmr->rq_pbl.idx; + } else { + init_info->sq_pa = qpmr->sq_pbl.addr; + init_info->rq_pa = qpmr->rq_pbl.addr; + } + return 0; +} + +/** + * i40iw_setup_kmode_qp - setup initialization for kernel mode qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: initialize info to return + */ +static int i40iw_setup_kmode_qp(struct i40iw_device *iwdev, + struct i40iw_qp *iwqp, + struct i40iw_qp_init_info *info) +{ + struct i40iw_dma_mem *mem = &iwqp->kqp.dma_mem; + u32 sqdepth, rqdepth; + u8 sqshift; + u32 size; + enum i40iw_status_code status; + struct i40iw_qp_uk_init_info *ukinfo = &info->qp_uk_init_info; + + i40iw_get_wqe_shift(ukinfo->max_sq_frag_cnt, ukinfo->max_inline_data, &sqshift); + status = i40iw_get_sqdepth(ukinfo->sq_size, sqshift, &sqdepth); + if (status) + return -ENOMEM; + + status = i40iw_get_rqdepth(ukinfo->rq_size, I40IW_MAX_RQ_WQE_SHIFT, &rqdepth); + if (status) + return -ENOMEM; + + size = sqdepth * sizeof(struct i40iw_sq_uk_wr_trk_info) + (rqdepth << 3); + iwqp->kqp.wrid_mem = kzalloc(size, GFP_KERNEL); + + ukinfo->sq_wrtrk_array = (struct i40iw_sq_uk_wr_trk_info *)iwqp->kqp.wrid_mem; + if (!ukinfo->sq_wrtrk_array) + return -ENOMEM; + + ukinfo->rq_wrid_array = (u64 *)&ukinfo->sq_wrtrk_array[sqdepth]; + + size = (sqdepth + rqdepth) * I40IW_QP_WQE_MIN_SIZE; + size += (I40IW_SHADOW_AREA_SIZE << 3); + + status = i40iw_allocate_dma_mem(iwdev->sc_dev.hw, mem, size, 256); + if (status) { + kfree(ukinfo->sq_wrtrk_array); + ukinfo->sq_wrtrk_array = NULL; + return -ENOMEM; + } + + ukinfo->sq = mem->va; + info->sq_pa = mem->pa; + + ukinfo->rq = &ukinfo->sq[sqdepth]; + info->rq_pa = info->sq_pa + (sqdepth * I40IW_QP_WQE_MIN_SIZE); + + ukinfo->shadow_area = ukinfo->rq[rqdepth].elem; + info->shadow_area_pa = info->rq_pa + (rqdepth * I40IW_QP_WQE_MIN_SIZE); + + ukinfo->sq_size = sqdepth >> sqshift; + ukinfo->rq_size = rqdepth >> I40IW_MAX_RQ_WQE_SHIFT; + ukinfo->qp_id = iwqp->ibqp.qp_num; + return 0; +} + +/** + * i40iw_create_qp - create qp + * @ibpd: ptr of pd + * @init_attr: attributes for qp + * @udata: user data for create qp + */ +static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, + struct ib_qp_init_attr *init_attr, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_device *iwdev = to_iwdev(ibpd->device); + struct i40iw_cqp *iwcqp = &iwdev->cqp; + struct i40iw_qp *iwqp; + struct i40iw_ucontext *ucontext; + struct i40iw_create_qp_req req; + struct i40iw_create_qp_resp uresp; + u32 qp_num = 0; + void *mem; + enum i40iw_status_code ret; + int err_code; + int sq_size; + int rq_size; + struct i40iw_sc_qp *qp; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_qp_init_info init_info; + struct i40iw_create_qp_info *qp_info; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iwarp_offload_info *iwarp_info; + unsigned long flags; + + if (iwdev->closing) + return ERR_PTR(-ENODEV); + + if (init_attr->create_flags) + return ERR_PTR(-EINVAL); + if (init_attr->cap.max_inline_data > I40IW_MAX_INLINE_DATA_SIZE) + init_attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; + + if (init_attr->cap.max_send_sge > I40IW_MAX_WQ_FRAGMENT_COUNT) + init_attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + + if (init_attr->cap.max_recv_sge > I40IW_MAX_WQ_FRAGMENT_COUNT) + init_attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + + memset(&init_info, 0, sizeof(init_info)); + + sq_size = init_attr->cap.max_send_wr; + rq_size = init_attr->cap.max_recv_wr; + + init_info.vsi = &iwdev->vsi; + init_info.qp_uk_init_info.sq_size = sq_size; + init_info.qp_uk_init_info.rq_size = rq_size; + init_info.qp_uk_init_info.max_sq_frag_cnt = init_attr->cap.max_send_sge; + init_info.qp_uk_init_info.max_rq_frag_cnt = init_attr->cap.max_recv_sge; + init_info.qp_uk_init_info.max_inline_data = init_attr->cap.max_inline_data; + + mem = kzalloc(sizeof(*iwqp), GFP_KERNEL); + if (!mem) + return ERR_PTR(-ENOMEM); + + iwqp = (struct i40iw_qp *)mem; + iwqp->allocated_buffer = mem; + qp = &iwqp->sc_qp; + qp->back_qp = (void *)iwqp; + qp->push_idx = I40IW_INVALID_PUSH_PAGE_INDEX; + + iwqp->ctx_info.iwarp_info = &iwqp->iwarp_info; + + if (i40iw_allocate_dma_mem(dev->hw, + &iwqp->q2_ctx_mem, + I40IW_Q2_BUFFER_SIZE + I40IW_QP_CTX_SIZE, + 256)) { + i40iw_pr_err("dma_mem failed\n"); + err_code = -ENOMEM; + goto error; + } + + init_info.q2 = iwqp->q2_ctx_mem.va; + init_info.q2_pa = iwqp->q2_ctx_mem.pa; + + init_info.host_ctx = (void *)init_info.q2 + I40IW_Q2_BUFFER_SIZE; + init_info.host_ctx_pa = init_info.q2_pa + I40IW_Q2_BUFFER_SIZE; + + err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_qps, iwdev->max_qp, + &qp_num, &iwdev->next_qp); + if (err_code) { + i40iw_pr_err("qp resource\n"); + goto error; + } + + iwqp->iwdev = iwdev; + iwqp->iwpd = iwpd; + iwqp->ibqp.qp_num = qp_num; + qp = &iwqp->sc_qp; + iwqp->iwscq = to_iwcq(init_attr->send_cq); + iwqp->iwrcq = to_iwcq(init_attr->recv_cq); + + iwqp->host_ctx.va = init_info.host_ctx; + iwqp->host_ctx.pa = init_info.host_ctx_pa; + iwqp->host_ctx.size = I40IW_QP_CTX_SIZE; + + init_info.pd = &iwpd->sc_pd; + init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num; + iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp; + + if (init_attr->qp_type != IB_QPT_RC) { + err_code = -EINVAL; + goto error; + } + if (iwdev->push_mode) + i40iw_alloc_push_page(iwdev, qp); + if (udata) { + err_code = ib_copy_from_udata(&req, udata, sizeof(req)); + if (err_code) { + i40iw_pr_err("ib_copy_from_data\n"); + goto error; + } + iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx; + if (ibpd->uobject && ibpd->uobject->context) { + iwqp->user_mode = 1; + ucontext = to_ucontext(ibpd->uobject->context); + + if (req.user_wqe_buffers) { + struct i40iw_pbl *iwpbl; + + spin_lock_irqsave( + &ucontext->qp_reg_mem_list_lock, flags); + iwpbl = i40iw_get_pbl( + (unsigned long)req.user_wqe_buffers, + &ucontext->qp_reg_mem_list); + spin_unlock_irqrestore( + &ucontext->qp_reg_mem_list_lock, flags); + + if (!iwpbl) { + err_code = -ENODATA; + i40iw_pr_err("no pbl info\n"); + goto error; + } + memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl)); + } + } + err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); + } else { + err_code = i40iw_setup_kmode_qp(iwdev, iwqp, &init_info); + } + + if (err_code) { + i40iw_pr_err("setup qp failed\n"); + goto error; + } + + init_info.type = I40IW_QP_TYPE_IWARP; + ret = dev->iw_priv_qp_ops->qp_init(qp, &init_info); + if (ret) { + err_code = -EPROTO; + i40iw_pr_err("qp_init fail\n"); + goto error; + } + ctx_info = &iwqp->ctx_info; + iwarp_info = &iwqp->iwarp_info; + iwarp_info->rd_enable = true; + iwarp_info->wr_rdresp_en = true; + if (!iwqp->user_mode) { + iwarp_info->fast_reg_en = true; + iwarp_info->priv_mode_en = true; + } + iwarp_info->ddp_ver = 1; + iwarp_info->rdmap_ver = 1; + + ctx_info->iwarp_info_valid = true; + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + if (qp->push_idx == I40IW_INVALID_PUSH_PAGE_INDEX) { + ctx_info->push_mode_en = false; + } else { + ctx_info->push_mode_en = true; + ctx_info->push_idx = qp->push_idx; + } + + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + (u64 *)iwqp->host_ctx.va, + ctx_info); + ctx_info->iwarp_info_valid = false; + cqp_request = i40iw_get_cqp_request(iwcqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto error; + } + cqp_info = &cqp_request->info; + qp_info = &cqp_request->info.in.u.qp_create.info; + + memset(qp_info, 0, sizeof(*qp_info)); + + qp_info->cq_num_valid = true; + qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE; + + cqp_info->cqp_cmd = OP_QP_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_create.qp = qp; + cqp_info->in.u.qp_create.scratch = (uintptr_t)cqp_request; + ret = i40iw_handle_cqp_op(iwdev, cqp_request); + if (ret) { + i40iw_pr_err("CQP-OP QP create fail"); + err_code = -EACCES; + goto error; + } + + i40iw_add_ref(&iwqp->ibqp); + spin_lock_init(&iwqp->lock); + iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0; + iwdev->qp_table[qp_num] = iwqp; + i40iw_add_pdusecount(iwqp->iwpd); + i40iw_add_devusecount(iwdev); + if (ibpd->uobject && udata) { + memset(&uresp, 0, sizeof(uresp)); + uresp.actual_sq_size = sq_size; + uresp.actual_rq_size = rq_size; + uresp.qp_id = qp_num; + uresp.push_idx = qp->push_idx; + err_code = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + if (err_code) { + i40iw_pr_err("copy_to_udata failed\n"); + i40iw_destroy_qp(&iwqp->ibqp); + /* let the completion of the qp destroy free the qp */ + return ERR_PTR(err_code); + } + } + init_completion(&iwqp->sq_drained); + init_completion(&iwqp->rq_drained); + + return &iwqp->ibqp; +error: + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + return ERR_PTR(err_code); +} + +/** + * i40iw_query - query qp attributes + * @ibqp: qp pointer + * @attr: attributes pointer + * @attr_mask: Not used + * @init_attr: qp attributes to return + */ +static int i40iw_query_qp(struct ib_qp *ibqp, + struct ib_qp_attr *attr, + int attr_mask, + struct ib_qp_init_attr *init_attr) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + attr->qp_state = iwqp->ibqp_state; + attr->cur_qp_state = attr->qp_state; + attr->qp_access_flags = 0; + attr->cap.max_send_wr = qp->qp_uk.sq_size; + attr->cap.max_recv_wr = qp->qp_uk.rq_size; + attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; + attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; + attr->port_num = 1; + init_attr->event_handler = iwqp->ibqp.event_handler; + init_attr->qp_context = iwqp->ibqp.qp_context; + init_attr->send_cq = iwqp->ibqp.send_cq; + init_attr->recv_cq = iwqp->ibqp.recv_cq; + init_attr->srq = iwqp->ibqp.srq; + init_attr->cap = attr->cap; + init_attr->port_num = 1; + return 0; +} + +/** + * i40iw_hw_modify_qp - setup cqp for modify qp + * @iwdev: iwarp device + * @iwqp: qp ptr (user or kernel) + * @info: info for modify qp + * @wait: flag to wait or not for modify qp completion + */ +void i40iw_hw_modify_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, + struct i40iw_modify_qp_info *info, bool wait) +{ + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_modify_qp_info *m_info; + struct i40iw_gen_ae_info ae_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, wait); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + m_info = &cqp_info->in.u.qp_modify.info; + memcpy(m_info, info, sizeof(*m_info)); + cqp_info->cqp_cmd = OP_QP_MODIFY; + cqp_info->post_sq = 1; + cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp; + cqp_info->in.u.qp_modify.scratch = (uintptr_t)cqp_request; + if (!i40iw_handle_cqp_op(iwdev, cqp_request)) + return; + + switch (m_info->next_iwarp_state) { + case I40IW_QP_STATE_RTS: + if (iwqp->iwarp_state == I40IW_QP_STATE_IDLE) + i40iw_send_reset(iwqp->cm_node); + /* fall through */ + case I40IW_QP_STATE_IDLE: + case I40IW_QP_STATE_TERMINATE: + case I40IW_QP_STATE_CLOSING: + ae_info.ae_code = I40IW_AE_BAD_CLOSE; + ae_info.ae_source = 0; + i40iw_gen_ae(iwdev, &iwqp->sc_qp, &ae_info, false); + break; + case I40IW_QP_STATE_ERROR: + default: + break; + } +} + +/** + * i40iw_modify_qp - modify qp request + * @ibqp: qp's pointer for modify + * @attr: access attributes + * @attr_mask: state mask + * @udata: user data + */ +int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_device *iwdev = iwqp->iwdev; + struct i40iw_qp_host_ctx_info *ctx_info; + struct i40iwarp_offload_info *iwarp_info; + struct i40iw_modify_qp_info info; + u8 issue_modify_qp = 0; + u8 dont_wait = 0; + u32 err; + unsigned long flags; + + memset(&info, 0, sizeof(info)); + ctx_info = &iwqp->ctx_info; + iwarp_info = &iwqp->iwarp_info; + + spin_lock_irqsave(&iwqp->lock, flags); + + if (attr_mask & IB_QP_STATE) { + if (iwdev->closing && attr->qp_state != IB_QPS_ERR) { + err = -EINVAL; + goto exit; + } + + switch (attr->qp_state) { + case IB_QPS_INIT: + case IB_QPS_RTR: + if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_IDLE) { + err = -EINVAL; + goto exit; + } + if (iwqp->iwarp_state == I40IW_QP_STATE_INVALID) { + info.next_iwarp_state = I40IW_QP_STATE_IDLE; + issue_modify_qp = 1; + } + break; + case IB_QPS_RTS: + if ((iwqp->iwarp_state > (u32)I40IW_QP_STATE_RTS) || + (!iwqp->cm_id)) { + err = -EINVAL; + goto exit; + } + + issue_modify_qp = 1; + iwqp->hw_tcp_state = I40IW_TCP_STATE_ESTABLISHED; + iwqp->hte_added = 1; + info.next_iwarp_state = I40IW_QP_STATE_RTS; + info.tcp_ctx_valid = true; + info.ord_valid = true; + info.arp_cache_idx_valid = true; + info.cq_num_valid = true; + break; + case IB_QPS_SQD: + if (iwqp->hw_iwarp_state > (u32)I40IW_QP_STATE_RTS) { + err = 0; + goto exit; + } + if ((iwqp->iwarp_state == (u32)I40IW_QP_STATE_CLOSING) || + (iwqp->iwarp_state < (u32)I40IW_QP_STATE_RTS)) { + err = 0; + goto exit; + } + if (iwqp->iwarp_state > (u32)I40IW_QP_STATE_CLOSING) { + err = -EINVAL; + goto exit; + } + info.next_iwarp_state = I40IW_QP_STATE_CLOSING; + issue_modify_qp = 1; + break; + case IB_QPS_SQE: + if (iwqp->iwarp_state >= (u32)I40IW_QP_STATE_TERMINATE) { + err = -EINVAL; + goto exit; + } + info.next_iwarp_state = I40IW_QP_STATE_TERMINATE; + issue_modify_qp = 1; + break; + case IB_QPS_ERR: + case IB_QPS_RESET: + if (iwqp->iwarp_state == (u32)I40IW_QP_STATE_ERROR) { + err = -EINVAL; + goto exit; + } + if (iwqp->sc_qp.term_flags) + i40iw_terminate_del_timer(&iwqp->sc_qp); + info.next_iwarp_state = I40IW_QP_STATE_ERROR; + if ((iwqp->hw_tcp_state > I40IW_TCP_STATE_CLOSED) && + iwdev->iw_status && + (iwqp->hw_tcp_state != I40IW_TCP_STATE_TIME_WAIT)) + info.reset_tcp_conn = true; + else + dont_wait = 1; + issue_modify_qp = 1; + info.next_iwarp_state = I40IW_QP_STATE_ERROR; + break; + default: + err = -EINVAL; + goto exit; + } + + iwqp->ibqp_state = attr->qp_state; + + } + if (attr_mask & IB_QP_ACCESS_FLAGS) { + ctx_info->iwarp_info_valid = true; + if (attr->qp_access_flags & IB_ACCESS_LOCAL_WRITE) + iwarp_info->wr_rdresp_en = true; + if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) + iwarp_info->wr_rdresp_en = true; + if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ) + iwarp_info->rd_enable = true; + if (attr->qp_access_flags & IB_ACCESS_MW_BIND) + iwarp_info->bind_en = true; + + if (iwqp->user_mode) { + iwarp_info->rd_enable = true; + iwarp_info->wr_rdresp_en = true; + iwarp_info->priv_mode_en = false; + } + } + + if (ctx_info->iwarp_info_valid) { + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + int ret; + + ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id; + ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id; + ret = dev->iw_priv_qp_ops->qp_setctx(&iwqp->sc_qp, + (u64 *)iwqp->host_ctx.va, + ctx_info); + if (ret) { + i40iw_pr_err("setting QP context\n"); + err = -EINVAL; + goto exit; + } + } + + spin_unlock_irqrestore(&iwqp->lock, flags); + + if (issue_modify_qp) { + i40iw_hw_modify_qp(iwdev, iwqp, &info, true); + + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->iwarp_state = info.next_iwarp_state; + spin_unlock_irqrestore(&iwqp->lock, flags); + } + + if (issue_modify_qp && (iwqp->ibqp_state > IB_QPS_RTS)) { + if (dont_wait) { + if (iwqp->cm_id && iwqp->hw_tcp_state) { + spin_lock_irqsave(&iwqp->lock, flags); + iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; + iwqp->last_aeq = I40IW_AE_RESET_SENT; + spin_unlock_irqrestore(&iwqp->lock, flags); + i40iw_cm_disconn(iwqp); + } + } else { + spin_lock_irqsave(&iwqp->lock, flags); + if (iwqp->cm_id) { + if (atomic_inc_return(&iwqp->close_timer_started) == 1) { + iwqp->cm_id->add_ref(iwqp->cm_id); + i40iw_schedule_cm_timer(iwqp->cm_node, + (struct i40iw_puda_buf *)iwqp, + I40IW_TIMER_TYPE_CLOSE, 1, 0); + } + } + spin_unlock_irqrestore(&iwqp->lock, flags); + } + } + return 0; +exit: + spin_unlock_irqrestore(&iwqp->lock, flags); + return err; +} + +/** + * cq_free_resources - free up recources for cq + * @iwdev: iwarp device + * @iwcq: cq ptr + */ +static void cq_free_resources(struct i40iw_device *iwdev, struct i40iw_cq *iwcq) +{ + struct i40iw_sc_cq *cq = &iwcq->sc_cq; + + if (!iwcq->user_mode) + i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwcq->kmem); + i40iw_free_resource(iwdev, iwdev->allocated_cqs, cq->cq_uk.cq_id); +} + +/** + * i40iw_cq_wq_destroy - send cq destroy cqp + * @iwdev: iwarp device + * @cq: hardware control cq + */ +void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq) +{ + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return; + + cqp_info = &cqp_request->info; + + cqp_info->cqp_cmd = OP_CQ_DESTROY; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_destroy.cq = cq; + cqp_info->in.u.cq_destroy.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP Destroy QP fail"); +} + +/** + * i40iw_destroy_cq - destroy cq + * @ib_cq: cq pointer + */ +static int i40iw_destroy_cq(struct ib_cq *ib_cq) +{ + struct i40iw_cq *iwcq; + struct i40iw_device *iwdev; + struct i40iw_sc_cq *cq; + + if (!ib_cq) { + i40iw_pr_err("ib_cq == NULL\n"); + return 0; + } + + iwcq = to_iwcq(ib_cq); + iwdev = to_iwdev(ib_cq->device); + cq = &iwcq->sc_cq; + i40iw_cq_wq_destroy(iwdev, cq); + cq_free_resources(iwdev, iwcq); + kfree(iwcq); + i40iw_rem_devusecount(iwdev); + return 0; +} + +/** + * i40iw_create_cq - create cq + * @ibdev: device pointer from stack + * @attr: attributes for cq + * @context: user context created during alloc + * @udata: user data + */ +static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, + const struct ib_cq_init_attr *attr, + struct ib_ucontext *context, + struct ib_udata *udata) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_cq *iwcq; + struct i40iw_pbl *iwpbl; + u32 cq_num = 0; + struct i40iw_sc_cq *cq; + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cq_init_info info; + enum i40iw_status_code status; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + struct i40iw_cq_uk_init_info *ukinfo = &info.cq_uk_init_info; + unsigned long flags; + int err_code; + int entries = attr->cqe; + + if (iwdev->closing) + return ERR_PTR(-ENODEV); + + if (entries > iwdev->max_cqe) + return ERR_PTR(-EINVAL); + + iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL); + if (!iwcq) + return ERR_PTR(-ENOMEM); + + memset(&info, 0, sizeof(info)); + + err_code = i40iw_alloc_resource(iwdev, iwdev->allocated_cqs, + iwdev->max_cq, &cq_num, + &iwdev->next_cq); + if (err_code) + goto error; + + cq = &iwcq->sc_cq; + cq->back_cq = (void *)iwcq; + spin_lock_init(&iwcq->lock); + + info.dev = dev; + ukinfo->cq_size = max(entries, 4); + ukinfo->cq_id = cq_num; + iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; + info.ceqe_mask = 0; + if (attr->comp_vector < iwdev->ceqs_count) + info.ceq_id = attr->comp_vector; + info.ceq_id_valid = true; + info.ceqe_mask = 1; + info.type = I40IW_CQ_TYPE_IWARP; + if (context) { + struct i40iw_ucontext *ucontext; + struct i40iw_create_cq_req req; + struct i40iw_cq_mr *cqmr; + + memset(&req, 0, sizeof(req)); + iwcq->user_mode = true; + ucontext = to_ucontext(context); + if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { + err_code = -EFAULT; + goto cq_free_resources; + } + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, + &ucontext->cq_reg_mem_list); + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + if (!iwpbl) { + err_code = -EPROTO; + goto cq_free_resources; + } + + iwcq->iwpbl = iwpbl; + iwcq->cq_mem_size = 0; + cqmr = &iwpbl->cq_mr; + info.shadow_area_pa = cpu_to_le64(cqmr->shadow); + if (iwpbl->pbl_allocated) { + info.virtual_map = true; + info.pbl_chunk_size = 1; + info.first_pm_pbl_idx = cqmr->cq_pbl.idx; + } else { + info.cq_base_pa = cqmr->cq_pbl.addr; + } + } else { + /* Kmode allocations */ + int rsize; + int shadow; + + rsize = info.cq_uk_init_info.cq_size * sizeof(struct i40iw_cqe); + rsize = round_up(rsize, 256); + shadow = I40IW_SHADOW_AREA_SIZE << 3; + status = i40iw_allocate_dma_mem(dev->hw, &iwcq->kmem, + rsize + shadow, 256); + if (status) { + err_code = -ENOMEM; + goto cq_free_resources; + } + ukinfo->cq_base = iwcq->kmem.va; + info.cq_base_pa = iwcq->kmem.pa; + info.shadow_area_pa = info.cq_base_pa + rsize; + ukinfo->shadow_area = iwcq->kmem.va + rsize; + } + + if (dev->iw_priv_cq_ops->cq_init(cq, &info)) { + i40iw_pr_err("init cq fail\n"); + err_code = -EPROTO; + goto cq_free_resources; + } + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) { + err_code = -ENOMEM; + goto cq_free_resources; + } + + cqp_info = &cqp_request->info; + cqp_info->cqp_cmd = OP_CQ_CREATE; + cqp_info->post_sq = 1; + cqp_info->in.u.cq_create.cq = cq; + cqp_info->in.u.cq_create.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + i40iw_pr_err("CQP-OP Create QP fail"); + err_code = -EPROTO; + goto cq_free_resources; + } + + if (context) { + struct i40iw_create_cq_resp resp; + + memset(&resp, 0, sizeof(resp)); + resp.cq_id = info.cq_uk_init_info.cq_id; + resp.cq_size = info.cq_uk_init_info.cq_size; + if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { + i40iw_pr_err("copy to user data\n"); + err_code = -EPROTO; + goto cq_destroy; + } + } + + i40iw_add_devusecount(iwdev); + return (struct ib_cq *)iwcq; + +cq_destroy: + i40iw_cq_wq_destroy(iwdev, cq); +cq_free_resources: + cq_free_resources(iwdev, iwcq); +error: + kfree(iwcq); + return ERR_PTR(err_code); +} + +/** + * i40iw_get_user_access - get hw access from IB access + * @acc: IB access to return hw access + */ +static inline u16 i40iw_get_user_access(int acc) +{ + u16 access = 0; + + access |= (acc & IB_ACCESS_LOCAL_WRITE) ? I40IW_ACCESS_FLAGS_LOCALWRITE : 0; + access |= (acc & IB_ACCESS_REMOTE_WRITE) ? I40IW_ACCESS_FLAGS_REMOTEWRITE : 0; + access |= (acc & IB_ACCESS_REMOTE_READ) ? I40IW_ACCESS_FLAGS_REMOTEREAD : 0; + access |= (acc & IB_ACCESS_MW_BIND) ? I40IW_ACCESS_FLAGS_BIND_WINDOW : 0; + return access; +} + +/** + * i40iw_free_stag - free stag resource + * @iwdev: iwarp device + * @stag: stag to free + */ +static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag) +{ + u32 stag_idx; + + stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT; + i40iw_free_resource(iwdev, iwdev->allocated_mrs, stag_idx); + i40iw_rem_devusecount(iwdev); +} + +/** + * i40iw_create_stag - create random stag + * @iwdev: iwarp device + */ +static u32 i40iw_create_stag(struct i40iw_device *iwdev) +{ + u32 stag = 0; + u32 stag_index = 0; + u32 next_stag_index; + u32 driver_key; + u32 random; + u8 consumer_key; + int ret; + + get_random_bytes(&random, sizeof(random)); + consumer_key = (u8)random; + + driver_key = random & ~iwdev->mr_stagmask; + next_stag_index = (random & iwdev->mr_stagmask) >> 8; + next_stag_index %= iwdev->max_mr; + + ret = i40iw_alloc_resource(iwdev, + iwdev->allocated_mrs, iwdev->max_mr, + &stag_index, &next_stag_index); + if (!ret) { + stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT; + stag |= driver_key; + stag += (u32)consumer_key; + i40iw_add_devusecount(iwdev); + } + return stag; +} + +/** + * i40iw_next_pbl_addr - Get next pbl address + * @pbl: pointer to a pble + * @pinfo: info pointer + * @idx: index + */ +static inline u64 *i40iw_next_pbl_addr(u64 *pbl, + struct i40iw_pble_info **pinfo, + u32 *idx) +{ + *idx += 1; + if ((!(*pinfo)) || (*idx != (*pinfo)->cnt)) + return ++pbl; + *idx = 0; + (*pinfo)++; + return (u64 *)(*pinfo)->addr; +} + +/** + * i40iw_copy_user_pgaddrs - copy user page address to pble's os locally + * @iwmr: iwmr for IB's user page addresses + * @pbl: ple pointer to save 1 level or 0 level pble + * @level: indicated level 0, 1 or 2 + */ +static void i40iw_copy_user_pgaddrs(struct i40iw_mr *iwmr, + u64 *pbl, + enum i40iw_pble_level level) +{ + struct ib_umem *region = iwmr->region; + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + int chunk_pages, entry, i; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_pble_info *pinfo; + struct scatterlist *sg; + u64 pg_addr = 0; + u32 idx = 0; + + pinfo = (level == I40IW_LEVEL_1) ? NULL : palloc->level2.leaf; + + for_each_sg(region->sg_head.sgl, sg, region->nmap, entry) { + chunk_pages = sg_dma_len(sg) >> region->page_shift; + if ((iwmr->type == IW_MEMREG_TYPE_QP) && + !iwpbl->qp_mr.sq_page) + iwpbl->qp_mr.sq_page = sg_page(sg); + for (i = 0; i < chunk_pages; i++) { + pg_addr = sg_dma_address(sg) + + (i << region->page_shift); + + if ((entry + i) == 0) + *pbl = cpu_to_le64(pg_addr & iwmr->page_msk); + else if (!(pg_addr & ~iwmr->page_msk)) + *pbl = cpu_to_le64(pg_addr); + else + continue; + pbl = i40iw_next_pbl_addr(pbl, &pinfo, &idx); + } + } +} + +/** + * i40iw_set_hugetlb_params - set MR pg size and mask to huge pg values. + * @addr: virtual address + * @iwmr: mr pointer for this memory registration + */ +static void i40iw_set_hugetlb_values(u64 addr, struct i40iw_mr *iwmr) +{ + struct vm_area_struct *vma; + struct hstate *h; + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + if (vma && is_vm_hugetlb_page(vma)) { + h = hstate_vma(vma); + if (huge_page_size(h) == 0x200000) { + iwmr->page_size = huge_page_size(h); + iwmr->page_msk = huge_page_mask(h); + } + } + up_read(¤t->mm->mmap_sem); +} + +/** + * i40iw_check_mem_contiguous - check if pbls stored in arr are contiguous + * @arr: lvl1 pbl array + * @npages: page count + * pg_size: page size + * + */ +static bool i40iw_check_mem_contiguous(u64 *arr, u32 npages, u32 pg_size) +{ + u32 pg_idx; + + for (pg_idx = 0; pg_idx < npages; pg_idx++) { + if ((*arr + (pg_size * pg_idx)) != arr[pg_idx]) + return false; + } + return true; +} + +/** + * i40iw_check_mr_contiguous - check if MR is physically contiguous + * @palloc: pbl allocation struct + * pg_size: page size + */ +static bool i40iw_check_mr_contiguous(struct i40iw_pble_alloc *palloc, u32 pg_size) +{ + struct i40iw_pble_level2 *lvl2 = &palloc->level2; + struct i40iw_pble_info *leaf = lvl2->leaf; + u64 *arr = NULL; + u64 *start_addr = NULL; + int i; + bool ret; + + if (palloc->level == I40IW_LEVEL_1) { + arr = (u64 *)palloc->level1.addr; + ret = i40iw_check_mem_contiguous(arr, palloc->total_cnt, pg_size); + return ret; + } + + start_addr = (u64 *)leaf->addr; + + for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) { + arr = (u64 *)leaf->addr; + if ((*start_addr + (i * pg_size * PBLE_PER_PAGE)) != *arr) + return false; + ret = i40iw_check_mem_contiguous(arr, leaf->cnt, pg_size); + if (!ret) + return false; + } + + return true; +} + +/** + * i40iw_setup_pbles - copy user pg address to pble's + * @iwdev: iwarp device + * @iwmr: mr pointer for this memory registration + * @use_pbles: flag if to use pble's + */ +static int i40iw_setup_pbles(struct i40iw_device *iwdev, + struct i40iw_mr *iwmr, + bool use_pbles) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_pble_info *pinfo; + u64 *pbl; + enum i40iw_status_code status; + enum i40iw_pble_level level = I40IW_LEVEL_1; + + if (use_pbles) { + mutex_lock(&iwdev->pbl_mutex); + status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); + mutex_unlock(&iwdev->pbl_mutex); + if (status) + return -ENOMEM; + + iwpbl->pbl_allocated = true; + level = palloc->level; + pinfo = (level == I40IW_LEVEL_1) ? &palloc->level1 : palloc->level2.leaf; + pbl = (u64 *)pinfo->addr; + } else { + pbl = iwmr->pgaddrmem; + } + + i40iw_copy_user_pgaddrs(iwmr, pbl, level); + + if (use_pbles) + iwmr->pgaddrmem[0] = *pbl; + + return 0; +} + +/** + * i40iw_handle_q_mem - handle memory for qp and cq + * @iwdev: iwarp device + * @req: information for q memory management + * @iwpbl: pble struct + * @use_pbles: flag to use pble + */ +static int i40iw_handle_q_mem(struct i40iw_device *iwdev, + struct i40iw_mem_reg_req *req, + struct i40iw_pbl *iwpbl, + bool use_pbles) +{ + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_mr *iwmr = iwpbl->iwmr; + struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; + struct i40iw_cq_mr *cqmr = &iwpbl->cq_mr; + struct i40iw_hmc_pble *hmc_p; + u64 *arr = iwmr->pgaddrmem; + u32 pg_size; + int err; + int total; + bool ret = true; + + total = req->sq_pages + req->rq_pages + req->cq_pages; + pg_size = iwmr->page_size; + + err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); + if (err) + return err; + + if (use_pbles && (palloc->level != I40IW_LEVEL_1)) { + i40iw_free_pble(iwdev->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + return -ENOMEM; + } + + if (use_pbles) + arr = (u64 *)palloc->level1.addr; + + if (iwmr->type == IW_MEMREG_TYPE_QP) { + hmc_p = &qpmr->sq_pbl; + qpmr->shadow = (dma_addr_t)arr[total]; + + if (use_pbles) { + ret = i40iw_check_mem_contiguous(arr, req->sq_pages, pg_size); + if (ret) + ret = i40iw_check_mem_contiguous(&arr[req->sq_pages], req->rq_pages, pg_size); + } + + if (!ret) { + hmc_p->idx = palloc->level1.idx; + hmc_p = &qpmr->rq_pbl; + hmc_p->idx = palloc->level1.idx + req->sq_pages; + } else { + hmc_p->addr = arr[0]; + hmc_p = &qpmr->rq_pbl; + hmc_p->addr = arr[req->sq_pages]; + } + } else { /* CQ */ + hmc_p = &cqmr->cq_pbl; + cqmr->shadow = (dma_addr_t)arr[total]; + + if (use_pbles) + ret = i40iw_check_mem_contiguous(arr, req->cq_pages, pg_size); + + if (!ret) + hmc_p->idx = palloc->level1.idx; + else + hmc_p->addr = arr[0]; + } + + if (use_pbles && ret) { + i40iw_free_pble(iwdev->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + + return err; +} + +/** + * i40iw_hw_alloc_stag - cqp command to allocate stag + * @iwdev: iwarp device + * @iwmr: iwarp mr pointer + */ +static int i40iw_hw_alloc_stag(struct i40iw_device *iwdev, struct i40iw_mr *iwmr) +{ + struct i40iw_allocate_stag_info *info; + struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + enum i40iw_status_code status; + int err = 0; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.alloc_stag.info; + memset(info, 0, sizeof(*info)); + info->page_size = PAGE_SIZE; + info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; + info->pd_id = iwpd->sc_pd.pd_id; + info->total_len = iwmr->length; + info->remote_access = true; + cqp_info->cqp_cmd = OP_ALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.alloc_stag.dev = &iwdev->sc_dev; + cqp_info->in.u.alloc_stag.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + err = -ENOMEM; + i40iw_pr_err("CQP-OP MR Reg fail"); + } + return err; +} + +/** + * i40iw_alloc_mr - register stag for fast memory registration + * @pd: ibpd pointer + * @mr_type: memory for stag registrion + * @max_num_sg: man number of pages + */ +static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd, + enum ib_mr_type mr_type, + u32 max_num_sg) +{ + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); + struct i40iw_pble_alloc *palloc; + struct i40iw_pbl *iwpbl; + struct i40iw_mr *iwmr; + enum i40iw_status_code status; + u32 stag; + int err_code = -ENOMEM; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + + stag = i40iw_create_stag(iwdev); + if (!stag) { + err_code = -EOVERFLOW; + goto err; + } + stag &= ~I40IW_CQPSQ_STAG_KEY_MASK; + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = IW_MEMREG_TYPE_MEM; + palloc = &iwpbl->pble_alloc; + iwmr->page_cnt = max_num_sg; + mutex_lock(&iwdev->pbl_mutex); + status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt); + mutex_unlock(&iwdev->pbl_mutex); + if (status) + goto err1; + + if (palloc->level != I40IW_LEVEL_1) + goto err2; + err_code = i40iw_hw_alloc_stag(iwdev, iwmr); + if (err_code) + goto err2; + iwpbl->pbl_allocated = true; + i40iw_add_pdusecount(iwpd); + return &iwmr->ibmr; +err2: + i40iw_free_pble(iwdev->pble_rsrc, palloc); +err1: + i40iw_free_stag(iwdev, stag); +err: + kfree(iwmr); + return ERR_PTR(err_code); +} + +/** + * i40iw_set_page - populate pbl list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @addr: page dma address fro pbl list + */ +static int i40iw_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct i40iw_mr *iwmr = to_iwmr(ibmr); + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + u64 *pbl; + + if (unlikely(iwmr->npages == iwmr->page_cnt)) + return -ENOMEM; + + pbl = (u64 *)palloc->level1.addr; + pbl[iwmr->npages++] = cpu_to_le64(addr); + return 0; +} + +/** + * i40iw_map_mr_sg - map of sg list for fmr + * @ibmr: ib mem to access iwarp mr pointer + * @sg: scatter gather list for fmr + * @sg_nents: number of sg pages + */ +static int i40iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, + int sg_nents, unsigned int *sg_offset) +{ + struct i40iw_mr *iwmr = to_iwmr(ibmr); + + iwmr->npages = 0; + return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, i40iw_set_page); +} + +/** + * i40iw_drain_sq - drain the send queue + * @ibqp: ib qp pointer + */ +static void i40iw_drain_sq(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + if (I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring)) + wait_for_completion(&iwqp->sq_drained); +} + +/** + * i40iw_drain_rq - drain the receive queue + * @ibqp: ib qp pointer + */ +static void i40iw_drain_rq(struct ib_qp *ibqp) +{ + struct i40iw_qp *iwqp = to_iwqp(ibqp); + struct i40iw_sc_qp *qp = &iwqp->sc_qp; + + if (I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring)) + wait_for_completion(&iwqp->rq_drained); +} + +/** + * i40iw_hwreg_mr - send cqp command for memory registration + * @iwdev: iwarp device + * @iwmr: iwarp mr pointer + * @access: access for MR + */ +static int i40iw_hwreg_mr(struct i40iw_device *iwdev, + struct i40iw_mr *iwmr, + u16 access) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_reg_ns_stag_info *stag_info; + struct i40iw_pd *iwpd = to_iwpd(iwmr->ibmr.pd); + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + enum i40iw_status_code status; + int err = 0; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + stag_info = &cqp_info->in.u.mr_reg_non_shared.info; + memset(stag_info, 0, sizeof(*stag_info)); + stag_info->va = (void *)(unsigned long)iwpbl->user_base; + stag_info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; + stag_info->stag_key = (u8)iwmr->stag; + stag_info->total_len = iwmr->length; + stag_info->access_rights = access; + stag_info->pd_id = iwpd->sc_pd.pd_id; + stag_info->addr_type = I40IW_ADDR_TYPE_VA_BASED; + stag_info->page_size = iwmr->page_size; + + if (iwpbl->pbl_allocated) { + if (palloc->level == I40IW_LEVEL_1) { + stag_info->first_pm_pbl_index = palloc->level1.idx; + stag_info->chunk_size = 1; + } else { + stag_info->first_pm_pbl_index = palloc->level2.root.idx; + stag_info->chunk_size = 3; + } + } else { + stag_info->reg_addr_pa = iwmr->pgaddrmem[0]; + } + + cqp_info->cqp_cmd = OP_MR_REG_NON_SHARED; + cqp_info->post_sq = 1; + cqp_info->in.u.mr_reg_non_shared.dev = &iwdev->sc_dev; + cqp_info->in.u.mr_reg_non_shared.scratch = (uintptr_t)cqp_request; + + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) { + err = -ENOMEM; + i40iw_pr_err("CQP-OP MR Reg fail"); + } + return err; +} + +/** + * i40iw_reg_user_mr - Register a user memory region + * @pd: ptr of pd + * @start: virtual start address + * @length: length of mr + * @virt: virtual address + * @acc: access of mr + * @udata: user data + */ +static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd, + u64 start, + u64 length, + u64 virt, + int acc, + struct ib_udata *udata) +{ + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); + struct i40iw_ucontext *ucontext; + struct i40iw_pble_alloc *palloc; + struct i40iw_pbl *iwpbl; + struct i40iw_mr *iwmr; + struct ib_umem *region; + struct i40iw_mem_reg_req req; + u64 pbl_depth = 0; + u32 stag = 0; + u16 access; + u64 region_length; + bool use_pbles = false; + unsigned long flags; + int err = -ENOSYS; + int ret; + int pg_shift; + + if (iwdev->closing) + return ERR_PTR(-ENODEV); + + if (length > I40IW_MAX_MR_SIZE) + return ERR_PTR(-EINVAL); + region = ib_umem_get(pd->uobject->context, start, length, acc, 0); + if (IS_ERR(region)) + return (struct ib_mr *)region; + + if (ib_copy_from_udata(&req, udata, sizeof(req))) { + ib_umem_release(region); + return ERR_PTR(-EFAULT); + } + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) { + ib_umem_release(region); + return ERR_PTR(-ENOMEM); + } + + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->region = region; + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + ucontext = to_ucontext(pd->uobject->context); + + iwmr->page_size = PAGE_SIZE; + iwmr->page_msk = PAGE_MASK; + + if (region->hugetlb && (req.reg_type == IW_MEMREG_TYPE_MEM)) + i40iw_set_hugetlb_values(start, iwmr); + + region_length = region->length + (start & (iwmr->page_size - 1)); + pg_shift = ffs(iwmr->page_size) - 1; + pbl_depth = region_length >> pg_shift; + pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0; + iwmr->length = region->length; + + iwpbl->user_base = virt; + palloc = &iwpbl->pble_alloc; + + iwmr->type = req.reg_type; + iwmr->page_cnt = (u32)pbl_depth; + + switch (req.reg_type) { + case IW_MEMREG_TYPE_QP: + use_pbles = ((req.sq_pages + req.rq_pages) > 2); + err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_CQ: + use_pbles = (req.cq_pages > 1); + err = i40iw_handle_q_mem(iwdev, &req, iwpbl, use_pbles); + if (err) + goto error; + + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list); + iwpbl->on_list = true; + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_MEM: + use_pbles = (iwmr->page_cnt != 1); + access = I40IW_ACCESS_FLAGS_LOCALREAD; + + err = i40iw_setup_pbles(iwdev, iwmr, use_pbles); + if (err) + goto error; + + if (use_pbles) { + ret = i40iw_check_mr_contiguous(palloc, iwmr->page_size); + if (ret) { + i40iw_free_pble(iwdev->pble_rsrc, palloc); + iwpbl->pbl_allocated = false; + } + } + + access |= i40iw_get_user_access(acc); + stag = i40iw_create_stag(iwdev); + if (!stag) { + err = -ENOMEM; + goto error; + } + + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + + err = i40iw_hwreg_mr(iwdev, iwmr, access); + if (err) { + i40iw_free_stag(iwdev, stag); + goto error; + } + + break; + default: + goto error; + } + + iwmr->type = req.reg_type; + if (req.reg_type == IW_MEMREG_TYPE_MEM) + i40iw_add_pdusecount(iwpd); + return &iwmr->ibmr; + +error: + if (palloc->level != I40IW_LEVEL_0 && iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + ib_umem_release(region); + kfree(iwmr); + return ERR_PTR(err); +} + +/** + * i40iw_reg_phys_mr - register kernel physical memory + * @pd: ibpd pointer + * @addr: physical address of memory to register + * @size: size of memory to register + * @acc: Access rights + * @iova_start: start of virtual address for physical buffers + */ +struct ib_mr *i40iw_reg_phys_mr(struct ib_pd *pd, + u64 addr, + u64 size, + int acc, + u64 *iova_start) +{ + struct i40iw_pd *iwpd = to_iwpd(pd); + struct i40iw_device *iwdev = to_iwdev(pd->device); + struct i40iw_pbl *iwpbl; + struct i40iw_mr *iwmr; + enum i40iw_status_code status; + u32 stag; + u16 access = I40IW_ACCESS_FLAGS_LOCALREAD; + int ret; + + iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL); + if (!iwmr) + return ERR_PTR(-ENOMEM); + iwmr->ibmr.pd = pd; + iwmr->ibmr.device = pd->device; + iwpbl = &iwmr->iwpbl; + iwpbl->iwmr = iwmr; + iwmr->type = IW_MEMREG_TYPE_MEM; + iwpbl->user_base = *iova_start; + stag = i40iw_create_stag(iwdev); + if (!stag) { + ret = -EOVERFLOW; + goto err; + } + access |= i40iw_get_user_access(acc); + iwmr->stag = stag; + iwmr->ibmr.rkey = stag; + iwmr->ibmr.lkey = stag; + iwmr->page_cnt = 1; + iwmr->pgaddrmem[0] = addr; + iwmr->length = size; + status = i40iw_hwreg_mr(iwdev, iwmr, access); + if (status) { + i40iw_free_stag(iwdev, stag); + ret = -ENOMEM; + goto err; + } + + i40iw_add_pdusecount(iwpd); + return &iwmr->ibmr; + err: + kfree(iwmr); + return ERR_PTR(ret); +} + +/** + * i40iw_get_dma_mr - register physical mem + * @pd: ptr of pd + * @acc: access for memory + */ +static struct ib_mr *i40iw_get_dma_mr(struct ib_pd *pd, int acc) +{ + u64 kva = 0; + + return i40iw_reg_phys_mr(pd, 0, 0, acc, &kva); +} + +/** + * i40iw_del_mem_list - Deleting pbl list entries for CQ/QP + * @iwmr: iwmr for IB's user page addresses + * @ucontext: ptr to user context + */ +static void i40iw_del_memlist(struct i40iw_mr *iwmr, + struct i40iw_ucontext *ucontext) +{ + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + unsigned long flags; + + switch (iwmr->type) { + case IW_MEMREG_TYPE_CQ: + spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->cq_reg_mem_list_lock, flags); + break; + case IW_MEMREG_TYPE_QP: + spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags); + if (iwpbl->on_list) { + iwpbl->on_list = false; + list_del(&iwpbl->list); + } + spin_unlock_irqrestore(&ucontext->qp_reg_mem_list_lock, flags); + break; + default: + break; + } +} + +/** + * i40iw_dereg_mr - deregister mr + * @ib_mr: mr ptr for dereg + */ +static int i40iw_dereg_mr(struct ib_mr *ib_mr) +{ + struct ib_pd *ibpd = ib_mr->pd; + struct i40iw_pd *iwpd = to_iwpd(ibpd); + struct i40iw_mr *iwmr = to_iwmr(ib_mr); + struct i40iw_device *iwdev = to_iwdev(ib_mr->device); + enum i40iw_status_code status; + struct i40iw_dealloc_stag_info *info; + struct i40iw_pbl *iwpbl = &iwmr->iwpbl; + struct i40iw_pble_alloc *palloc = &iwpbl->pble_alloc; + struct i40iw_cqp_request *cqp_request; + struct cqp_commands_info *cqp_info; + u32 stag_idx; + + if (iwmr->region) + ib_umem_release(iwmr->region); + + if (iwmr->type != IW_MEMREG_TYPE_MEM) { + if (ibpd->uobject) { + struct i40iw_ucontext *ucontext; + + ucontext = to_ucontext(ibpd->uobject->context); + i40iw_del_memlist(iwmr, ucontext); + } + if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + kfree(iwmr); + return 0; + } + + cqp_request = i40iw_get_cqp_request(&iwdev->cqp, true); + if (!cqp_request) + return -ENOMEM; + + cqp_info = &cqp_request->info; + info = &cqp_info->in.u.dealloc_stag.info; + memset(info, 0, sizeof(*info)); + + info->pd_id = cpu_to_le32(iwpd->sc_pd.pd_id & 0x00007fff); + info->stag_idx = RS_64_1(ib_mr->rkey, I40IW_CQPSQ_STAG_IDX_SHIFT); + stag_idx = info->stag_idx; + info->mr = true; + if (iwpbl->pbl_allocated) + info->dealloc_pbl = true; + + cqp_info->cqp_cmd = OP_DEALLOC_STAG; + cqp_info->post_sq = 1; + cqp_info->in.u.dealloc_stag.dev = &iwdev->sc_dev; + cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request; + status = i40iw_handle_cqp_op(iwdev, cqp_request); + if (status) + i40iw_pr_err("CQP-OP dealloc failed for stag_idx = 0x%x\n", stag_idx); + i40iw_rem_pdusecount(iwpd, iwdev); + i40iw_free_stag(iwdev, iwmr->stag); + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, palloc); + kfree(iwmr); + return 0; +} + +/** + * i40iw_show_rev + */ +static ssize_t i40iw_show_rev(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i40iw_ib_device *iwibdev = container_of(dev, + struct i40iw_ib_device, + ibdev.dev); + u32 hw_rev = iwibdev->iwdev->sc_dev.hw_rev; + + return sprintf(buf, "%x\n", hw_rev); +} + +/** + * i40iw_show_hca + */ +static ssize_t i40iw_show_hca(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "I40IW\n"); +} + +/** + * i40iw_show_board + */ +static ssize_t i40iw_show_board(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%.*s\n", 32, "I40IW Board ID"); +} + +static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL); +static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL); + +static struct device_attribute *i40iw_dev_attributes[] = { + &dev_attr_hw_rev, + &dev_attr_hca_type, + &dev_attr_board_id +}; + +/** + * i40iw_copy_sg_list - copy sg list for qp + * @sg_list: copied into sg_list + * @sgl: copy from sgl + * @num_sges: count of sg entries + */ +static void i40iw_copy_sg_list(struct i40iw_sge *sg_list, struct ib_sge *sgl, int num_sges) +{ + unsigned int i; + + for (i = 0; (i < num_sges) && (i < I40IW_MAX_WQ_FRAGMENT_COUNT); i++) { + sg_list[i].tag_off = sgl[i].addr; + sg_list[i].len = sgl[i].length; + sg_list[i].stag = sgl[i].lkey; + } +} + +/** + * i40iw_post_send - kernel application wr + * @ibqp: qp ptr for wr + * @ib_wr: work request ptr + * @bad_wr: return of bad wr if err + */ +static int i40iw_post_send(struct ib_qp *ibqp, + const struct ib_send_wr *ib_wr, + const struct ib_send_wr **bad_wr) +{ + struct i40iw_qp *iwqp; + struct i40iw_qp_uk *ukqp; + struct i40iw_post_sq_info info; + enum i40iw_status_code ret; + int err = 0; + unsigned long flags; + bool inv_stag; + + iwqp = (struct i40iw_qp *)ibqp; + ukqp = &iwqp->sc_qp.qp_uk; + + spin_lock_irqsave(&iwqp->lock, flags); + + if (iwqp->flush_issued) { + err = -EINVAL; + goto out; + } + + while (ib_wr) { + inv_stag = false; + memset(&info, 0, sizeof(info)); + info.wr_id = (u64)(ib_wr->wr_id); + if ((ib_wr->send_flags & IB_SEND_SIGNALED) || iwqp->sig_all) + info.signaled = true; + if (ib_wr->send_flags & IB_SEND_FENCE) + info.read_fence = true; + + switch (ib_wr->opcode) { + case IB_WR_SEND: + /* fall-through */ + case IB_WR_SEND_WITH_INV: + if (ib_wr->opcode == IB_WR_SEND) { + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.op_type = I40IW_OP_TYPE_SEND_SOL; + else + info.op_type = I40IW_OP_TYPE_SEND; + } else { + if (ib_wr->send_flags & IB_SEND_SOLICITED) + info.op_type = I40IW_OP_TYPE_SEND_SOL_INV; + else + info.op_type = I40IW_OP_TYPE_SEND_INV; + } + + if (ib_wr->send_flags & IB_SEND_INLINE) { + info.op.inline_send.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; + info.op.inline_send.len = ib_wr->sg_list[0].length; + ret = ukqp->ops.iw_inline_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false); + } else { + info.op.send.num_sges = ib_wr->num_sge; + info.op.send.sg_list = (struct i40iw_sge *)ib_wr->sg_list; + ret = ukqp->ops.iw_send(ukqp, &info, ib_wr->ex.invalidate_rkey, false); + } + + if (ret) { + if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) + err = -ENOMEM; + else + err = -EINVAL; + } + break; + case IB_WR_RDMA_WRITE: + info.op_type = I40IW_OP_TYPE_RDMA_WRITE; + + if (ib_wr->send_flags & IB_SEND_INLINE) { + info.op.inline_rdma_write.data = (void *)(unsigned long)ib_wr->sg_list[0].addr; + info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; + info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + ret = ukqp->ops.iw_inline_rdma_write(ukqp, &info, false); + } else { + info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; + info.op.rdma_write.num_lo_sges = ib_wr->num_sge; + info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + ret = ukqp->ops.iw_rdma_write(ukqp, &info, false); + } + + if (ret) { + if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) + err = -ENOMEM; + else + err = -EINVAL; + } + break; + case IB_WR_RDMA_READ_WITH_INV: + inv_stag = true; + /* fall-through*/ + case IB_WR_RDMA_READ: + if (ib_wr->num_sge > I40IW_MAX_SGE_RD) { + err = -EINVAL; + break; + } + info.op_type = I40IW_OP_TYPE_RDMA_READ; + info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_read.lo_addr.tag_off = ib_wr->sg_list->addr; + info.op.rdma_read.lo_addr.stag = ib_wr->sg_list->lkey; + info.op.rdma_read.lo_addr.len = ib_wr->sg_list->length; + ret = ukqp->ops.iw_rdma_read(ukqp, &info, inv_stag, false); + if (ret) { + if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) + err = -ENOMEM; + else + err = -EINVAL; + } + break; + case IB_WR_LOCAL_INV: + info.op_type = I40IW_OP_TYPE_INV_STAG; + info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; + ret = ukqp->ops.iw_stag_local_invalidate(ukqp, &info, true); + if (ret) + err = -ENOMEM; + break; + case IB_WR_REG_MR: + { + struct i40iw_mr *iwmr = to_iwmr(reg_wr(ib_wr)->mr); + int flags = reg_wr(ib_wr)->access; + struct i40iw_pble_alloc *palloc = &iwmr->iwpbl.pble_alloc; + struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev; + struct i40iw_fast_reg_stag_info info; + + memset(&info, 0, sizeof(info)); + info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD; + info.access_rights |= i40iw_get_user_access(flags); + info.stag_key = reg_wr(ib_wr)->key & 0xff; + info.stag_idx = reg_wr(ib_wr)->key >> 8; + info.page_size = reg_wr(ib_wr)->mr->page_size; + info.wr_id = ib_wr->wr_id; + + info.addr_type = I40IW_ADDR_TYPE_VA_BASED; + info.va = (void *)(uintptr_t)iwmr->ibmr.iova; + info.total_len = iwmr->ibmr.length; + info.reg_addr_pa = *(u64 *)palloc->level1.addr; + info.first_pm_pbl_index = palloc->level1.idx; + info.local_fence = ib_wr->send_flags & IB_SEND_FENCE; + info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED; + + if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR) + info.chunk_size = 1; + + ret = dev->iw_priv_qp_ops->iw_mr_fast_register(&iwqp->sc_qp, &info, true); + if (ret) + err = -ENOMEM; + break; + } + default: + err = -EINVAL; + i40iw_pr_err(" upost_send bad opcode = 0x%x\n", + ib_wr->opcode); + break; + } + + if (err) + break; + ib_wr = ib_wr->next; + } + +out: + if (err) + *bad_wr = ib_wr; + else + ukqp->ops.iw_qp_post_wr(ukqp); + spin_unlock_irqrestore(&iwqp->lock, flags); + + return err; +} + +/** + * i40iw_post_recv - post receive wr for kernel application + * @ibqp: ib qp pointer + * @ib_wr: work request for receive + * @bad_wr: bad wr caused an error + */ +static int i40iw_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *ib_wr, + const struct ib_recv_wr **bad_wr) +{ + struct i40iw_qp *iwqp; + struct i40iw_qp_uk *ukqp; + struct i40iw_post_rq_info post_recv; + struct i40iw_sge sg_list[I40IW_MAX_WQ_FRAGMENT_COUNT]; + enum i40iw_status_code ret = 0; + unsigned long flags; + int err = 0; + + iwqp = (struct i40iw_qp *)ibqp; + ukqp = &iwqp->sc_qp.qp_uk; + + memset(&post_recv, 0, sizeof(post_recv)); + spin_lock_irqsave(&iwqp->lock, flags); + + if (iwqp->flush_issued) { + err = -EINVAL; + goto out; + } + + while (ib_wr) { + post_recv.num_sges = ib_wr->num_sge; + post_recv.wr_id = ib_wr->wr_id; + i40iw_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); + post_recv.sg_list = sg_list; + ret = ukqp->ops.iw_post_receive(ukqp, &post_recv); + if (ret) { + i40iw_pr_err(" post_recv err %d\n", ret); + if (ret == I40IW_ERR_QP_TOOMANY_WRS_POSTED) + err = -ENOMEM; + else + err = -EINVAL; + *bad_wr = ib_wr; + goto out; + } + ib_wr = ib_wr->next; + } + out: + spin_unlock_irqrestore(&iwqp->lock, flags); + return err; +} + +/** + * i40iw_poll_cq - poll cq for completion (kernel apps) + * @ibcq: cq to poll + * @num_entries: number of entries to poll + * @entry: wr of entry completed + */ +static int i40iw_poll_cq(struct ib_cq *ibcq, + int num_entries, + struct ib_wc *entry) +{ + struct i40iw_cq *iwcq; + int cqe_count = 0; + struct i40iw_cq_poll_info cq_poll_info; + enum i40iw_status_code ret; + struct i40iw_cq_uk *ukcq; + struct i40iw_sc_qp *qp; + struct i40iw_qp *iwqp; + unsigned long flags; + + iwcq = (struct i40iw_cq *)ibcq; + ukcq = &iwcq->sc_cq.cq_uk; + + spin_lock_irqsave(&iwcq->lock, flags); + while (cqe_count < num_entries) { + ret = ukcq->ops.iw_cq_poll_completion(ukcq, &cq_poll_info); + if (ret == I40IW_ERR_QUEUE_EMPTY) { + break; + } else if (ret == I40IW_ERR_QUEUE_DESTROYED) { + continue; + } else if (ret) { + if (!cqe_count) + cqe_count = -1; + break; + } + entry->wc_flags = 0; + entry->wr_id = cq_poll_info.wr_id; + if (cq_poll_info.error) { + entry->status = IB_WC_WR_FLUSH_ERR; + entry->vendor_err = cq_poll_info.major_err << 16 | cq_poll_info.minor_err; + } else { + entry->status = IB_WC_SUCCESS; + } + + switch (cq_poll_info.op_type) { + case I40IW_OP_TYPE_RDMA_WRITE: + entry->opcode = IB_WC_RDMA_WRITE; + break; + case I40IW_OP_TYPE_RDMA_READ_INV_STAG: + case I40IW_OP_TYPE_RDMA_READ: + entry->opcode = IB_WC_RDMA_READ; + break; + case I40IW_OP_TYPE_SEND_SOL: + case I40IW_OP_TYPE_SEND_SOL_INV: + case I40IW_OP_TYPE_SEND_INV: + case I40IW_OP_TYPE_SEND: + entry->opcode = IB_WC_SEND; + break; + case I40IW_OP_TYPE_REC: + entry->opcode = IB_WC_RECV; + break; + default: + entry->opcode = IB_WC_RECV; + break; + } + + entry->ex.imm_data = 0; + qp = (struct i40iw_sc_qp *)cq_poll_info.qp_handle; + entry->qp = (struct ib_qp *)qp->back_qp; + entry->src_qp = cq_poll_info.qp_id; + iwqp = (struct i40iw_qp *)qp->back_qp; + if (iwqp->iwarp_state > I40IW_QP_STATE_RTS) { + if (!I40IW_RING_MORE_WORK(qp->qp_uk.sq_ring)) + complete(&iwqp->sq_drained); + if (!I40IW_RING_MORE_WORK(qp->qp_uk.rq_ring)) + complete(&iwqp->rq_drained); + } + entry->byte_len = cq_poll_info.bytes_xfered; + entry++; + cqe_count++; + } + spin_unlock_irqrestore(&iwcq->lock, flags); + return cqe_count; +} + +/** + * i40iw_req_notify_cq - arm cq kernel application + * @ibcq: cq to arm + * @notify_flags: notofication flags + */ +static int i40iw_req_notify_cq(struct ib_cq *ibcq, + enum ib_cq_notify_flags notify_flags) +{ + struct i40iw_cq *iwcq; + struct i40iw_cq_uk *ukcq; + unsigned long flags; + enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT; + + iwcq = (struct i40iw_cq *)ibcq; + ukcq = &iwcq->sc_cq.cq_uk; + if (notify_flags == IB_CQ_SOLICITED) + cq_notify = IW_CQ_COMPL_SOLICITED; + spin_lock_irqsave(&iwcq->lock, flags); + ukcq->ops.iw_cq_request_notification(ukcq, cq_notify); + spin_unlock_irqrestore(&iwcq->lock, flags); + return 0; +} + +/** + * i40iw_port_immutable - return port's immutable data + * @ibdev: ib dev struct + * @port_num: port number + * @immutable: immutable data for the port return + */ +static int i40iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; + + err = ib_query_port(ibdev, port_num, &attr); + + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + +static const char * const i40iw_hw_stat_names[] = { + // 32bit names + [I40IW_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", + [I40IW_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", + [I40IW_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", + [I40IW_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", + [I40IW_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", + [I40IW_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", + [I40IW_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs", + [I40IW_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors", + [I40IW_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors", + // 64bit names + [I40IW_HW_STAT_INDEX_IP4RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InOctets", + [I40IW_HW_STAT_INDEX_IP4RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InPkts", + [I40IW_HW_STAT_INDEX_IP4RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InReasmRqd", + [I40IW_HW_STAT_INDEX_IP4RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4InMcastPkts", + [I40IW_HW_STAT_INDEX_IP4TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutOctets", + [I40IW_HW_STAT_INDEX_IP4TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutPkts", + [I40IW_HW_STAT_INDEX_IP4TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutSegRqd", + [I40IW_HW_STAT_INDEX_IP4TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip4OutMcastPkts", + [I40IW_HW_STAT_INDEX_IP6RXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InOctets", + [I40IW_HW_STAT_INDEX_IP6RXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InPkts", + [I40IW_HW_STAT_INDEX_IP6RXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InReasmRqd", + [I40IW_HW_STAT_INDEX_IP6RXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6InMcastPkts", + [I40IW_HW_STAT_INDEX_IP6TXOCTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutOctets", + [I40IW_HW_STAT_INDEX_IP6TXPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutPkts", + [I40IW_HW_STAT_INDEX_IP6TXFRAGS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutSegRqd", + [I40IW_HW_STAT_INDEX_IP6TXMCPKTS + I40IW_HW_STAT_INDEX_MAX_32] = + "ip6OutMcastPkts", + [I40IW_HW_STAT_INDEX_TCPRXSEGS + I40IW_HW_STAT_INDEX_MAX_32] = + "tcpInSegs", + [I40IW_HW_STAT_INDEX_TCPTXSEG + I40IW_HW_STAT_INDEX_MAX_32] = + "tcpOutSegs", + [I40IW_HW_STAT_INDEX_RDMARXRDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaReads", + [I40IW_HW_STAT_INDEX_RDMARXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaSends", + [I40IW_HW_STAT_INDEX_RDMARXWRS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwInRdmaWrites", + [I40IW_HW_STAT_INDEX_RDMATXRDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaReads", + [I40IW_HW_STAT_INDEX_RDMATXSNDS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaSends", + [I40IW_HW_STAT_INDEX_RDMATXWRS + I40IW_HW_STAT_INDEX_MAX_32] = + "iwOutRdmaWrites", + [I40IW_HW_STAT_INDEX_RDMAVBND + I40IW_HW_STAT_INDEX_MAX_32] = + "iwRdmaBnd", + [I40IW_HW_STAT_INDEX_RDMAVINV + I40IW_HW_STAT_INDEX_MAX_32] = + "iwRdmaInv" +}; + +static void i40iw_get_dev_fw_str(struct ib_device *dev, char *str) +{ + u32 firmware_version = I40IW_FW_VERSION; + + snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u", firmware_version, + (firmware_version & 0x000000ff)); +} + +/** + * i40iw_alloc_hw_stats - Allocate a hw stats structure + * @ibdev: device pointer from stack + * @port_num: port number + */ +static struct rdma_hw_stats *i40iw_alloc_hw_stats(struct ib_device *ibdev, + u8 port_num) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + int num_counters = I40IW_HW_STAT_INDEX_MAX_32 + + I40IW_HW_STAT_INDEX_MAX_64; + unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; + + BUILD_BUG_ON(ARRAY_SIZE(i40iw_hw_stat_names) != + (I40IW_HW_STAT_INDEX_MAX_32 + + I40IW_HW_STAT_INDEX_MAX_64)); + + /* + * PFs get the default update lifespan, but VFs only update once + * per second + */ + if (!dev->is_pf) + lifespan = 1000; + return rdma_alloc_hw_stats_struct(i40iw_hw_stat_names, num_counters, + lifespan); +} + +/** + * i40iw_get_hw_stats - Populates the rdma_hw_stats structure + * @ibdev: device pointer from stack + * @stats: stats pointer from stack + * @port_num: port number + * @index: which hw counter the stack is requesting we update + */ +static int i40iw_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u8 port_num, int index) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_vsi_pestat *devstat = iwdev->vsi.pestat; + struct i40iw_dev_hw_stats *hw_stats = &devstat->hw_stats; + + if (dev->is_pf) { + i40iw_hw_stats_read_all(devstat, &devstat->hw_stats); + } else { + if (i40iw_vchnl_vf_get_pe_stats(dev, &devstat->hw_stats)) + return -ENOSYS; + } + + memcpy(&stats->value[0], hw_stats, sizeof(*hw_stats)); + + return stats->num_counters; +} + +/** + * i40iw_query_gid - Query port GID + * @ibdev: device pointer from stack + * @port: port number + * @index: Entry index + * @gid: Global ID + */ +static int i40iw_query_gid(struct ib_device *ibdev, + u8 port, + int index, + union ib_gid *gid) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + + memset(gid->raw, 0, sizeof(gid->raw)); + ether_addr_copy(gid->raw, iwdev->netdev->dev_addr); + return 0; +} + +/** + * i40iw_query_pkey - Query partition key + * @ibdev: device pointer from stack + * @port: port number + * @index: index of pkey + * @pkey: pointer to store the pkey + */ +static int i40iw_query_pkey(struct ib_device *ibdev, + u8 port, + u16 index, + u16 *pkey) +{ + *pkey = 0; + return 0; +} + +/** + * i40iw_get_vector_affinity - report IRQ affinity mask + * @ibdev: IB device + * @comp_vector: completion vector index + */ +static const struct cpumask *i40iw_get_vector_affinity(struct ib_device *ibdev, + int comp_vector) +{ + struct i40iw_device *iwdev = to_iwdev(ibdev); + struct i40iw_msix_vector *msix_vec; + + if (iwdev->msix_shared) + msix_vec = &iwdev->iw_msixtbl[comp_vector]; + else + msix_vec = &iwdev->iw_msixtbl[comp_vector + 1]; + + return irq_get_affinity_mask(msix_vec->irq); +} + +/** + * i40iw_init_rdma_device - initialization of iwarp device + * @iwdev: iwarp device + */ +static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev) +{ + struct i40iw_ib_device *iwibdev; + struct net_device *netdev = iwdev->netdev; + struct pci_dev *pcidev = (struct pci_dev *)iwdev->hw.dev_context; + + iwibdev = (struct i40iw_ib_device *)ib_alloc_device(sizeof(*iwibdev)); + if (!iwibdev) { + i40iw_pr_err("iwdev == NULL\n"); + return NULL; + } + strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX); + iwibdev->ibdev.owner = THIS_MODULE; + iwdev->iwibdev = iwibdev; + iwibdev->iwdev = iwdev; + + iwibdev->ibdev.node_type = RDMA_NODE_RNIC; + ether_addr_copy((u8 *)&iwibdev->ibdev.node_guid, netdev->dev_addr); + + iwibdev->ibdev.uverbs_cmd_mask = + (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | + (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | + (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | + (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | + (1ull << IB_USER_VERBS_CMD_REG_MR) | + (1ull << IB_USER_VERBS_CMD_DEREG_MR) | + (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | + (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | + (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | + (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_QP) | + (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | + (1ull << IB_USER_VERBS_CMD_QUERY_QP) | + (1ull << IB_USER_VERBS_CMD_POLL_CQ) | + (1ull << IB_USER_VERBS_CMD_CREATE_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | + (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | + (1ull << IB_USER_VERBS_CMD_POST_RECV) | + (1ull << IB_USER_VERBS_CMD_POST_SEND); + iwibdev->ibdev.phys_port_cnt = 1; + iwibdev->ibdev.num_comp_vectors = iwdev->ceqs_count; + iwibdev->ibdev.dev.parent = &pcidev->dev; + iwibdev->ibdev.query_port = i40iw_query_port; + iwibdev->ibdev.query_pkey = i40iw_query_pkey; + iwibdev->ibdev.query_gid = i40iw_query_gid; + iwibdev->ibdev.alloc_ucontext = i40iw_alloc_ucontext; + iwibdev->ibdev.dealloc_ucontext = i40iw_dealloc_ucontext; + iwibdev->ibdev.mmap = i40iw_mmap; + iwibdev->ibdev.alloc_pd = i40iw_alloc_pd; + iwibdev->ibdev.dealloc_pd = i40iw_dealloc_pd; + iwibdev->ibdev.create_qp = i40iw_create_qp; + iwibdev->ibdev.modify_qp = i40iw_modify_qp; + iwibdev->ibdev.query_qp = i40iw_query_qp; + iwibdev->ibdev.destroy_qp = i40iw_destroy_qp; + iwibdev->ibdev.create_cq = i40iw_create_cq; + iwibdev->ibdev.destroy_cq = i40iw_destroy_cq; + iwibdev->ibdev.get_dma_mr = i40iw_get_dma_mr; + iwibdev->ibdev.reg_user_mr = i40iw_reg_user_mr; + iwibdev->ibdev.dereg_mr = i40iw_dereg_mr; + iwibdev->ibdev.alloc_hw_stats = i40iw_alloc_hw_stats; + iwibdev->ibdev.get_hw_stats = i40iw_get_hw_stats; + iwibdev->ibdev.query_device = i40iw_query_device; + iwibdev->ibdev.drain_sq = i40iw_drain_sq; + iwibdev->ibdev.drain_rq = i40iw_drain_rq; + iwibdev->ibdev.alloc_mr = i40iw_alloc_mr; + iwibdev->ibdev.map_mr_sg = i40iw_map_mr_sg; + iwibdev->ibdev.iwcm = kzalloc(sizeof(*iwibdev->ibdev.iwcm), GFP_KERNEL); + if (!iwibdev->ibdev.iwcm) { + ib_dealloc_device(&iwibdev->ibdev); + return NULL; + } + + iwibdev->ibdev.iwcm->add_ref = i40iw_add_ref; + iwibdev->ibdev.iwcm->rem_ref = i40iw_rem_ref; + iwibdev->ibdev.iwcm->get_qp = i40iw_get_qp; + iwibdev->ibdev.iwcm->connect = i40iw_connect; + iwibdev->ibdev.iwcm->accept = i40iw_accept; + iwibdev->ibdev.iwcm->reject = i40iw_reject; + iwibdev->ibdev.iwcm->create_listen = i40iw_create_listen; + iwibdev->ibdev.iwcm->destroy_listen = i40iw_destroy_listen; + memcpy(iwibdev->ibdev.iwcm->ifname, netdev->name, + sizeof(iwibdev->ibdev.iwcm->ifname)); + iwibdev->ibdev.get_port_immutable = i40iw_port_immutable; + iwibdev->ibdev.get_dev_fw_str = i40iw_get_dev_fw_str; + iwibdev->ibdev.poll_cq = i40iw_poll_cq; + iwibdev->ibdev.req_notify_cq = i40iw_req_notify_cq; + iwibdev->ibdev.post_send = i40iw_post_send; + iwibdev->ibdev.post_recv = i40iw_post_recv; + iwibdev->ibdev.get_vector_affinity = i40iw_get_vector_affinity; + + return iwibdev; +} + +/** + * i40iw_port_ibevent - indicate port event + * @iwdev: iwarp device + */ +void i40iw_port_ibevent(struct i40iw_device *iwdev) +{ + struct i40iw_ib_device *iwibdev = iwdev->iwibdev; + struct ib_event event; + + event.device = &iwibdev->ibdev; + event.element.port_num = 1; + event.event = iwdev->iw_status ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + ib_dispatch_event(&event); +} + +/** + * i40iw_unregister_rdma_device - unregister of iwarp from IB + * @iwibdev: rdma device ptr + */ +static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) + device_remove_file(&iwibdev->ibdev.dev, + i40iw_dev_attributes[i]); + ib_unregister_device(&iwibdev->ibdev); +} + +/** + * i40iw_destroy_rdma_device - destroy rdma device and free resources + * @iwibdev: IB device ptr + */ +void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev) +{ + if (!iwibdev) + return; + + i40iw_unregister_rdma_device(iwibdev); + kfree(iwibdev->ibdev.iwcm); + iwibdev->ibdev.iwcm = NULL; + wait_event_timeout(iwibdev->iwdev->close_wq, + !atomic64_read(&iwibdev->iwdev->use_count), + I40IW_EVENT_TIMEOUT); + ib_dealloc_device(&iwibdev->ibdev); +} + +/** + * i40iw_register_rdma_device - register iwarp device to IB + * @iwdev: iwarp device + */ +int i40iw_register_rdma_device(struct i40iw_device *iwdev) +{ + int i, ret; + struct i40iw_ib_device *iwibdev; + + iwdev->iwibdev = i40iw_init_rdma_device(iwdev); + if (!iwdev->iwibdev) + return -ENOMEM; + iwibdev = iwdev->iwibdev; + + iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW; + ret = ib_register_device(&iwibdev->ibdev, NULL); + if (ret) + goto error; + + for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) { + ret = + device_create_file(&iwibdev->ibdev.dev, + i40iw_dev_attributes[i]); + if (ret) { + while (i > 0) { + i--; + device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]); + } + ib_unregister_device(&iwibdev->ibdev); + goto error; + } + } + return 0; +error: + kfree(iwdev->iwibdev->ibdev.iwcm); + iwdev->iwibdev->ibdev.iwcm = NULL; + ib_dealloc_device(&iwdev->iwibdev->ibdev); + return ret; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h new file mode 100644 index 000000000..76cf17337 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h @@ -0,0 +1,180 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VERBS_H +#define I40IW_VERBS_H + +struct i40iw_ucontext { + struct ib_ucontext ibucontext; + struct i40iw_device *iwdev; + struct list_head cq_reg_mem_list; + spinlock_t cq_reg_mem_list_lock; /* memory list for cq's */ + struct list_head qp_reg_mem_list; + spinlock_t qp_reg_mem_list_lock; /* memory list for qp's */ + int abi_ver; +}; + +struct i40iw_pd { + struct ib_pd ibpd; + struct i40iw_sc_pd sc_pd; + atomic_t usecount; +}; + +struct i40iw_hmc_pble { + union { + u32 idx; + dma_addr_t addr; + }; +}; + +struct i40iw_cq_mr { + struct i40iw_hmc_pble cq_pbl; + dma_addr_t shadow; +}; + +struct i40iw_qp_mr { + struct i40iw_hmc_pble sq_pbl; + struct i40iw_hmc_pble rq_pbl; + dma_addr_t shadow; + struct page *sq_page; +}; + +struct i40iw_pbl { + struct list_head list; + union { + struct i40iw_qp_mr qp_mr; + struct i40iw_cq_mr cq_mr; + }; + + bool pbl_allocated; + bool on_list; + u64 user_base; + struct i40iw_pble_alloc pble_alloc; + struct i40iw_mr *iwmr; +}; + +#define MAX_SAVE_PAGE_ADDRS 4 +struct i40iw_mr { + union { + struct ib_mr ibmr; + struct ib_mw ibmw; + struct ib_fmr ibfmr; + }; + struct ib_umem *region; + u16 type; + u32 page_cnt; + u32 page_size; + u64 page_msk; + u32 npages; + u32 stag; + u64 length; + u64 pgaddrmem[MAX_SAVE_PAGE_ADDRS]; + struct i40iw_pbl iwpbl; +}; + +struct i40iw_cq { + struct ib_cq ibcq; + struct i40iw_sc_cq sc_cq; + u16 cq_head; + u16 cq_size; + u16 cq_number; + bool user_mode; + u32 polled_completions; + u32 cq_mem_size; + struct i40iw_dma_mem kmem; + spinlock_t lock; /* for poll cq */ + struct i40iw_pbl *iwpbl; +}; + +struct disconn_work { + struct work_struct work; + struct i40iw_qp *iwqp; +}; + +struct iw_cm_id; +struct ietf_mpa_frame; +struct i40iw_ud_file; + +struct i40iw_qp_kmode { + struct i40iw_dma_mem dma_mem; + u64 *wrid_mem; +}; + +struct i40iw_qp { + struct ib_qp ibqp; + struct i40iw_sc_qp sc_qp; + struct i40iw_device *iwdev; + struct i40iw_cq *iwscq; + struct i40iw_cq *iwrcq; + struct i40iw_pd *iwpd; + struct i40iw_qp_host_ctx_info ctx_info; + struct i40iwarp_offload_info iwarp_info; + void *allocated_buffer; + atomic_t refcount; + struct iw_cm_id *cm_id; + void *cm_node; + struct ib_mr *lsmm_mr; + struct work_struct work; + enum ib_qp_state ibqp_state; + u32 iwarp_state; + u32 qp_mem_size; + u32 last_aeq; + atomic_t close_timer_started; + spinlock_t lock; /* for post work requests */ + struct i40iw_qp_context *iwqp_context; + void *pbl_vbase; + dma_addr_t pbl_pbase; + struct page *page; + u8 active_conn:1; + u8 user_mode:1; + u8 hte_added:1; + u8 flush_issued:1; + u8 destroyed:1; + u8 sig_all:1; + u8 pau_mode:1; + u8 rsvd:1; + u16 term_sq_flush_code; + u16 term_rq_flush_code; + u8 hw_iwarp_state; + u8 hw_tcp_state; + struct i40iw_qp_kmode kqp; + struct i40iw_dma_mem host_ctx; + struct timer_list terminate_timer; + struct i40iw_pbl iwpbl; + struct i40iw_dma_mem q2_ctx_mem; + struct i40iw_dma_mem ietf_mem; + struct completion sq_drained; + struct completion rq_drained; +}; +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.c b/drivers/infiniband/hw/i40iw/i40iw_vf.c new file mode 100644 index 000000000..e33d48109 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_vf.c @@ -0,0 +1,85 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_vf.h" + +/** + * i40iw_manage_vf_pble_bp - manage vf pble + * @cqp: cqp for cqp' sq wqe + * @info: pble info + * @scratch: pointer for completion + * @post_sq: to post and ring + */ +enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp, + struct i40iw_manage_vf_pble_info *info, + u64 scratch, + bool post_sq) +{ + u64 *wqe; + u64 temp, header, pd_pl_pba = 0; + + wqe = i40iw_sc_cqp_get_next_send_wqe(cqp, scratch); + if (!wqe) + return I40IW_ERR_RING_FULL; + + temp = LS_64(info->pd_entry_cnt, I40IW_CQPSQ_MVPBP_PD_ENTRY_CNT) | + LS_64(info->first_pd_index, I40IW_CQPSQ_MVPBP_FIRST_PD_INX) | + LS_64(info->sd_index, I40IW_CQPSQ_MVPBP_SD_INX); + set_64bit_val(wqe, 16, temp); + + header = LS_64((info->inv_pd_ent ? 1 : 0), I40IW_CQPSQ_MVPBP_INV_PD_ENT) | + LS_64(I40IW_CQP_OP_MANAGE_VF_PBLE_BP, I40IW_CQPSQ_OPCODE) | + LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); + set_64bit_val(wqe, 24, header); + + pd_pl_pba = LS_64(info->pd_pl_pba >> 3, I40IW_CQPSQ_MVPBP_PD_PLPBA); + set_64bit_val(wqe, 32, pd_pl_pba); + + i40iw_debug_buf(cqp->dev, I40IW_DEBUG_WQE, "MANAGE VF_PBLE_BP WQE", wqe, I40IW_CQP_WQE_SIZE * 8); + + if (post_sq) + i40iw_sc_cqp_post_sq(cqp); + return 0; +} + +const struct i40iw_vf_cqp_ops iw_vf_cqp_ops = { + i40iw_manage_vf_pble_bp +}; diff --git a/drivers/infiniband/hw/i40iw/i40iw_vf.h b/drivers/infiniband/hw/i40iw/i40iw_vf.h new file mode 100644 index 000000000..4359559ec --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_vf.h @@ -0,0 +1,62 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VF_H +#define I40IW_VF_H + +struct i40iw_sc_cqp; + +struct i40iw_manage_vf_pble_info { + u32 sd_index; + u16 first_pd_index; + u16 pd_entry_cnt; + u8 inv_pd_ent; + u64 pd_pl_pba; +}; + +struct i40iw_vf_cqp_ops { + enum i40iw_status_code (*manage_vf_pble_bp)(struct i40iw_sc_cqp *, + struct i40iw_manage_vf_pble_info *, + u64, + bool); +}; + +enum i40iw_status_code i40iw_manage_vf_pble_bp(struct i40iw_sc_cqp *cqp, + struct i40iw_manage_vf_pble_info *info, + u64 scratch, + bool post_sq); + +extern const struct i40iw_vf_cqp_ops iw_vf_cqp_ops; + +#endif diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c new file mode 100644 index 000000000..48fd327f8 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c @@ -0,0 +1,756 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#include "i40iw_osdep.h" +#include "i40iw_register.h" +#include "i40iw_status.h" +#include "i40iw_hmc.h" +#include "i40iw_d.h" +#include "i40iw_type.h" +#include "i40iw_p.h" +#include "i40iw_virtchnl.h" + +/** + * vchnl_vf_send_get_ver_req - Request Channel version + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_ver_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg); + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_VER; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_VER_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_get_hmc_fcn_req - Request HMC Function from VF + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_hmc_fcn_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg); + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_HMC_FCN; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_HMC_FCN_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_get_pe_stats_req - Request PE stats from VF + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_get_pe_stats_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + + if (!dev->vchnl_up) + return ret_code; + + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_dev_hw_stats) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_GET_STATS; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_GET_STATS_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_add_hmc_objs_req - Add HMC objects + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + */ +static enum i40iw_status_code vchnl_vf_send_add_hmc_objs_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + + if (!dev->vchnl_up) + return ret_code; + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + memset(add_hmc_obj, 0, sizeof(*add_hmc_obj)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0; + add_hmc_obj->obj_type = (u16)rsrc_type; + add_hmc_obj->start_index = start_index; + add_hmc_obj->obj_count = rsrc_count; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_vf_send_del_hmc_objs_req - del HMC objects + * @dev: IWARP device pointer + * @vchnl_req: Virtual channel message request pointer + * @ rsrc_type - resource type to delete + * @ start_index - starting index for resource + * @ rsrc_count - number of resource type to delete + */ +static enum i40iw_status_code vchnl_vf_send_del_hmc_objs_req(struct i40iw_sc_dev *dev, + struct i40iw_virtchnl_req *vchnl_req, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + enum i40iw_status_code ret_code = I40IW_ERR_NOT_READY; + struct i40iw_virtchnl_op_buf *vchnl_msg = vchnl_req->vchnl_msg; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + + if (!dev->vchnl_up) + return ret_code; + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + memset(vchnl_msg, 0, sizeof(*vchnl_msg)); + memset(add_hmc_obj, 0, sizeof(*add_hmc_obj)); + vchnl_msg->iw_chnl_op_ctx = (uintptr_t)vchnl_req; + vchnl_msg->iw_chnl_buf_len = sizeof(*vchnl_msg) + sizeof(struct i40iw_virtchnl_hmc_obj_range) - 1; + vchnl_msg->iw_op_code = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE; + vchnl_msg->iw_op_ver = I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0; + add_hmc_obj->obj_type = (u16)rsrc_type; + add_hmc_obj->start_index = start_index; + add_hmc_obj->obj_count = rsrc_count; + ret_code = dev->vchnl_if.vchnl_send(dev, 0, (u8 *)vchnl_msg, vchnl_msg->iw_chnl_buf_len); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); + return ret_code; +} + +/** + * vchnl_pf_send_get_ver_resp - Send channel version to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_get_ver_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u32) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((u32 *)vchnl_msg_resp->iw_chnl_buf) = I40IW_VCHNL_CHNL_VER_V0; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_get_hmc_fcn_resp - Send HMC Function to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_get_hmc_fcn_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + u16 hmc_fcn) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(u16) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((u16 *)vchnl_msg_resp->iw_chnl_buf) = hmc_fcn; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_get_pe_stats_resp - Send PE Stats to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + * @hw_stats: HW Stats struct + */ + +static void vchnl_pf_send_get_pe_stats_resp(struct i40iw_sc_dev *dev, + u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + struct i40iw_dev_hw_stats *hw_stats) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf) + sizeof(struct i40iw_dev_hw_stats) - 1]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(*resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = I40IW_SUCCESS; + *((struct i40iw_dev_hw_stats *)vchnl_msg_resp->iw_chnl_buf) = *hw_stats; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * vchnl_pf_send_error_resp - Send an error response to VF + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @vchnl_msg: Virtual channel message buffer pointer + */ +static void vchnl_pf_send_error_resp(struct i40iw_sc_dev *dev, u32 vf_id, + struct i40iw_virtchnl_op_buf *vchnl_msg, + u16 op_ret_code) +{ + enum i40iw_status_code ret_code; + u8 resp_buffer[sizeof(struct i40iw_virtchnl_resp_buf)]; + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)resp_buffer; + + memset(resp_buffer, 0, sizeof(resp_buffer)); + vchnl_msg_resp->iw_chnl_op_ctx = vchnl_msg->iw_chnl_op_ctx; + vchnl_msg_resp->iw_chnl_buf_len = sizeof(resp_buffer); + vchnl_msg_resp->iw_op_ret_code = (u16)op_ret_code; + ret_code = dev->vchnl_if.vchnl_send(dev, vf_id, resp_buffer, sizeof(resp_buffer)); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: virt channel send failed 0x%x\n", __func__, ret_code); +} + +/** + * pf_cqp_get_hmc_fcn_callback - Callback for Get HMC Fcn + * @cqp_req_param: CQP Request param value + * @not_used: unused CQP callback parameter + */ +static void pf_cqp_get_hmc_fcn_callback(struct i40iw_sc_dev *dev, void *callback_param, + struct i40iw_ccq_cqe_info *cqe_info) +{ + struct i40iw_vfdev *vf_dev = callback_param; + struct i40iw_virt_mem vf_dev_mem; + + if (cqe_info->error) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "CQP Completion Error on Get HMC Function. Maj = 0x%04x, Minor = 0x%04x\n", + cqe_info->maj_err_code, cqe_info->min_err_code); + dev->vf_dev[vf_dev->iw_vf_idx] = NULL; + vchnl_pf_send_error_resp(dev, vf_dev->vf_id, &vf_dev->vf_msg_buffer.vchnl_msg, + (u16)I40IW_ERR_CQP_COMPL_ERROR); + vf_dev_mem.va = vf_dev; + vf_dev_mem.size = sizeof(*vf_dev); + i40iw_free_virt_mem(dev->hw, &vf_dev_mem); + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "CQP Completion Operation Return information = 0x%08x\n", + cqe_info->op_ret_val); + vf_dev->pmf_index = (u16)cqe_info->op_ret_val; + vf_dev->msg_count--; + vchnl_pf_send_get_hmc_fcn_resp(dev, + vf_dev->vf_id, + &vf_dev->vf_msg_buffer.vchnl_msg, + vf_dev->pmf_index); + } +} + +/** + * pf_add_hmc_obj - Callback for Add HMC Object + * @vf_dev: pointer to the VF Device + */ +static void pf_add_hmc_obj_callback(void *work_vf_dev) +{ + struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev; + struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info; + struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg; + struct i40iw_hmc_create_obj_info info; + struct i40iw_virtchnl_hmc_obj_range *add_hmc_obj; + enum i40iw_status_code ret_code; + + if (!vf_dev->pf_hmc_initialized) { + ret_code = i40iw_pf_init_vfhmc(vf_dev->pf_dev, (u8)vf_dev->pmf_index, NULL); + if (ret_code) + goto add_out; + vf_dev->pf_hmc_initialized = true; + } + + add_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.is_pf = false; + info.rsrc_type = (u32)add_hmc_obj->obj_type; + info.entry_type = (info.rsrc_type == I40IW_HMC_IW_PBLE) ? I40IW_SD_TYPE_PAGED : I40IW_SD_TYPE_DIRECT; + info.start_idx = add_hmc_obj->start_index; + info.count = add_hmc_obj->obj_count; + i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT, + "I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE. Add %u type %u objects\n", + info.count, info.rsrc_type); + ret_code = i40iw_sc_create_hmc_obj(vf_dev->pf_dev, &info); + if (!ret_code) + vf_dev->hmc_info.hmc_obj[add_hmc_obj->obj_type].cnt = add_hmc_obj->obj_count; +add_out: + vf_dev->msg_count--; + vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code); +} + +/** + * pf_del_hmc_obj_callback - Callback for delete HMC Object + * @work_vf_dev: pointer to the VF Device + */ +static void pf_del_hmc_obj_callback(void *work_vf_dev) +{ + struct i40iw_vfdev *vf_dev = (struct i40iw_vfdev *)work_vf_dev; + struct i40iw_hmc_info *hmc_info = &vf_dev->hmc_info; + struct i40iw_virtchnl_op_buf *vchnl_msg = &vf_dev->vf_msg_buffer.vchnl_msg; + struct i40iw_hmc_del_obj_info info; + struct i40iw_virtchnl_hmc_obj_range *del_hmc_obj; + enum i40iw_status_code ret_code = I40IW_SUCCESS; + + if (!vf_dev->pf_hmc_initialized) + goto del_out; + + del_hmc_obj = (struct i40iw_virtchnl_hmc_obj_range *)vchnl_msg->iw_chnl_buf; + + memset(&info, 0, sizeof(info)); + info.hmc_info = hmc_info; + info.is_pf = false; + info.rsrc_type = (u32)del_hmc_obj->obj_type; + info.start_idx = del_hmc_obj->start_index; + info.count = del_hmc_obj->obj_count; + i40iw_debug(vf_dev->pf_dev, I40IW_DEBUG_VIRT, + "I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE. Delete %u type %u objects\n", + info.count, info.rsrc_type); + ret_code = i40iw_sc_del_hmc_obj(vf_dev->pf_dev, &info, false); +del_out: + vf_dev->msg_count--; + vchnl_pf_send_error_resp(vf_dev->pf_dev, vf_dev->vf_id, vchnl_msg, (u16)ret_code); +} + +/** + * i40iw_vf_init_pestat - Initialize stats for VF + * @devL pointer to the VF Device + * @stats: Statistics structure pointer + * @index: Stats index + */ +static void i40iw_vf_init_pestat(struct i40iw_sc_dev *dev, struct i40iw_vsi_pestat *stats, u16 index) +{ + stats->hw = dev->hw; + i40iw_hw_stats_init(stats, (u8)index, false); + spin_lock_init(&stats->lock); +} + +/** + * i40iw_vchnl_recv_pf - Receive PF virtual channel messages + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @msg: Virtual channel message buffer pointer + * @len: Length of the virtual channels message + */ +enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_virtchnl_op_buf *vchnl_msg = (struct i40iw_virtchnl_op_buf *)msg; + struct i40iw_vfdev *vf_dev = NULL; + struct i40iw_hmc_fcn_info hmc_fcn_info; + u16 iw_vf_idx; + u16 first_avail_iw_vf = I40IW_MAX_PE_ENABLED_VF_COUNT; + struct i40iw_virt_mem vf_dev_mem; + struct i40iw_virtchnl_work_info work_info; + struct i40iw_vsi_pestat *stats; + enum i40iw_status_code ret_code; + + if (!dev || !msg || !len) + return I40IW_ERR_PARAM; + + if (!dev->vchnl_up) + return I40IW_ERR_NOT_READY; + if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { + vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); + return I40IW_SUCCESS; + } + for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { + if (!dev->vf_dev[iw_vf_idx]) { + if (first_avail_iw_vf == I40IW_MAX_PE_ENABLED_VF_COUNT) + first_avail_iw_vf = iw_vf_idx; + continue; + } + if (dev->vf_dev[iw_vf_idx]->vf_id == vf_id) { + vf_dev = dev->vf_dev[iw_vf_idx]; + break; + } + } + if (vf_dev) { + if (!vf_dev->msg_count) { + vf_dev->msg_count++; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u already has a channel message in progress.\n", + vf_id); + return I40IW_SUCCESS; + } + } + switch (vchnl_msg->iw_op_code) { + case I40IW_VCHNL_OP_GET_HMC_FCN: + if (!vf_dev && + (first_avail_iw_vf != I40IW_MAX_PE_ENABLED_VF_COUNT)) { + ret_code = i40iw_allocate_virt_mem(dev->hw, &vf_dev_mem, sizeof(struct i40iw_vfdev) + + (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX)); + if (!ret_code) { + vf_dev = vf_dev_mem.va; + vf_dev->stats_initialized = false; + vf_dev->pf_dev = dev; + vf_dev->msg_count = 1; + vf_dev->vf_id = vf_id; + vf_dev->iw_vf_idx = first_avail_iw_vf; + vf_dev->pf_hmc_initialized = false; + vf_dev->hmc_info.hmc_obj = (struct i40iw_hmc_obj_info *)(&vf_dev[1]); + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "vf_dev %p, hmc_info %p, hmc_obj %p\n", + vf_dev, &vf_dev->hmc_info, vf_dev->hmc_info.hmc_obj); + dev->vf_dev[first_avail_iw_vf] = vf_dev; + iw_vf_idx = first_avail_iw_vf; + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u Unable to allocate a VF device structure.\n", + vf_id); + vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, (u16)I40IW_ERR_NO_MEMORY); + return I40IW_SUCCESS; + } + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + hmc_fcn_info.callback_fcn = pf_cqp_get_hmc_fcn_callback; + hmc_fcn_info.vf_id = vf_id; + hmc_fcn_info.iw_vf_idx = vf_dev->iw_vf_idx; + hmc_fcn_info.cqp_callback_param = vf_dev; + hmc_fcn_info.free_fcn = false; + ret_code = i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info); + if (ret_code) + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "VF%u error CQP HMC Function operation.\n", + vf_id); + i40iw_vf_init_pestat(dev, &vf_dev->pestat, vf_dev->pmf_index); + vf_dev->stats_initialized = true; + } else { + if (vf_dev) { + vf_dev->msg_count--; + vchnl_pf_send_get_hmc_fcn_resp(dev, vf_id, vchnl_msg, vf_dev->pmf_index); + } else { + vchnl_pf_send_error_resp(dev, vf_id, vchnl_msg, + (u16)I40IW_ERR_NO_MEMORY); + } + } + break; + case I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + work_info.worker_vf_dev = vf_dev; + work_info.callback_fcn = pf_add_hmc_obj_callback; + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx); + break; + case I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + work_info.worker_vf_dev = vf_dev; + work_info.callback_fcn = pf_del_hmc_obj_callback; + memcpy(&vf_dev->vf_msg_buffer.vchnl_msg, vchnl_msg, len); + i40iw_cqp_spawn_worker(dev, &work_info, vf_dev->iw_vf_idx); + break; + case I40IW_VCHNL_OP_GET_STATS: + if (!vf_dev) + return I40IW_ERR_BAD_PTR; + stats = &vf_dev->pestat; + i40iw_hw_stats_read_all(stats, &stats->hw_stats); + vf_dev->msg_count--; + vchnl_pf_send_get_pe_stats_resp(dev, vf_id, vchnl_msg, &stats->hw_stats); + break; + default: + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "40iw_vchnl_recv_pf: Invalid OpCode 0x%x\n", + vchnl_msg->iw_op_code); + vchnl_pf_send_error_resp(dev, vf_id, + vchnl_msg, (u16)I40IW_ERR_NOT_IMPLEMENTED); + } + return I40IW_SUCCESS; +} + +/** + * i40iw_vchnl_recv_vf - Receive VF virtual channel messages + * @dev: IWARP device pointer + * @vf_id: Virtual function ID associated with the message + * @msg: Virtual channel message buffer pointer + * @len: Length of the virtual channels message + */ +enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len) +{ + struct i40iw_virtchnl_resp_buf *vchnl_msg_resp = (struct i40iw_virtchnl_resp_buf *)msg; + struct i40iw_virtchnl_req *vchnl_req; + + vchnl_req = (struct i40iw_virtchnl_req *)(uintptr_t)vchnl_msg_resp->iw_chnl_op_ctx; + vchnl_req->ret_code = (enum i40iw_status_code)vchnl_msg_resp->iw_op_ret_code; + if (len == (sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)) { + if (vchnl_req->parm_len && vchnl_req->parm) + memcpy(vchnl_req->parm, vchnl_msg_resp->iw_chnl_buf, vchnl_req->parm_len); + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: Got response, data size %u\n", __func__, + vchnl_req->parm_len); + } else { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s: error length on response, Got %u, expected %u\n", __func__, + len, (u32)(sizeof(*vchnl_msg_resp) + vchnl_req->parm_len - 1)); + } + + return I40IW_SUCCESS; +} + +/** + * i40iw_vchnl_vf_get_ver - Request Channel version + * @dev: IWARP device pointer + * @vchnl_ver: Virtual channel message version pointer + */ +enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev, + u32 *vchnl_ver) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + if (!i40iw_vf_clear_to_send(dev)) + return I40IW_ERR_TIMEOUT; + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = vchnl_ver; + vchnl_req.parm_len = sizeof(*vchnl_ver); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + + ret_code = vchnl_vf_send_get_ver_req(dev, &vchnl_req); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + return ret_code; + } + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (ret_code) + return ret_code; + else + return vchnl_req.ret_code; +} + +/** + * i40iw_vchnl_vf_get_hmc_fcn - Request HMC Function + * @dev: IWARP device pointer + * @hmc_fcn: HMC function index pointer + */ +enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev, + u16 *hmc_fcn) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + if (!i40iw_vf_clear_to_send(dev)) + return I40IW_ERR_TIMEOUT; + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = hmc_fcn; + vchnl_req.parm_len = sizeof(*hmc_fcn); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + + ret_code = vchnl_vf_send_get_hmc_fcn_req(dev, &vchnl_req); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + return ret_code; + } + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (ret_code) + return ret_code; + else + return vchnl_req.ret_code; +} + +/** + * i40iw_vchnl_vf_add_hmc_objs - Add HMC Object + * @dev: IWARP device pointer + * @rsrc_type: HMC Resource type + * @start_index: Starting index of the objects to be added + * @rsrc_count: Number of resources to be added + */ +enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + if (!i40iw_vf_clear_to_send(dev)) + return I40IW_ERR_TIMEOUT; + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + + ret_code = vchnl_vf_send_add_hmc_objs_req(dev, + &vchnl_req, + rsrc_type, + start_index, + rsrc_count); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + return ret_code; + } + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (ret_code) + return ret_code; + else + return vchnl_req.ret_code; +} + +/** + * i40iw_vchnl_vf_del_hmc_obj - del HMC obj + * @dev: IWARP device pointer + * @rsrc_type: HMC Resource type + * @start_index: Starting index of the object to delete + * @rsrc_count: Number of resources to be delete + */ +enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + if (!i40iw_vf_clear_to_send(dev)) + return I40IW_ERR_TIMEOUT; + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + + ret_code = vchnl_vf_send_del_hmc_objs_req(dev, + &vchnl_req, + rsrc_type, + start_index, + rsrc_count); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + return ret_code; + } + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (ret_code) + return ret_code; + else + return vchnl_req.ret_code; +} + +/** + * i40iw_vchnl_vf_get_pe_stats - Get PE stats + * @dev: IWARP device pointer + * @hw_stats: HW stats struct + */ +enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev, + struct i40iw_dev_hw_stats *hw_stats) +{ + struct i40iw_virtchnl_req vchnl_req; + enum i40iw_status_code ret_code; + + if (!i40iw_vf_clear_to_send(dev)) + return I40IW_ERR_TIMEOUT; + memset(&vchnl_req, 0, sizeof(vchnl_req)); + vchnl_req.dev = dev; + vchnl_req.parm = hw_stats; + vchnl_req.parm_len = sizeof(*hw_stats); + vchnl_req.vchnl_msg = &dev->vchnl_vf_msg_buf.vchnl_msg; + + ret_code = vchnl_vf_send_get_pe_stats_req(dev, &vchnl_req); + if (ret_code) { + i40iw_debug(dev, I40IW_DEBUG_VIRT, + "%s Send message failed 0x%0x\n", __func__, ret_code); + return ret_code; + } + ret_code = i40iw_vf_wait_vchnl_resp(dev); + if (ret_code) + return ret_code; + else + return vchnl_req.ret_code; +} diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h new file mode 100644 index 000000000..24886ef08 --- /dev/null +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.h @@ -0,0 +1,124 @@ +/******************************************************************************* +* +* Copyright (c) 2015-2016 Intel Corporation. All rights reserved. +* +* This software is available to you under a choice of one of two +* licenses. You may choose to be licensed under the terms of the GNU +* General Public License (GPL) Version 2, available from the file +* COPYING in the main directory of this source tree, or the +* OpenFabrics.org BSD license below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +*******************************************************************************/ + +#ifndef I40IW_VIRTCHNL_H +#define I40IW_VIRTCHNL_H + +#include "i40iw_hmc.h" + +#pragma pack(push, 1) + +struct i40iw_virtchnl_op_buf { + u16 iw_op_code; + u16 iw_op_ver; + u16 iw_chnl_buf_len; + u16 rsvd; + u64 iw_chnl_op_ctx; + /* Member alignment MUST be maintained above this location */ + u8 iw_chnl_buf[1]; +}; + +struct i40iw_virtchnl_resp_buf { + u64 iw_chnl_op_ctx; + u16 iw_chnl_buf_len; + s16 iw_op_ret_code; + /* Member alignment MUST be maintained above this location */ + u16 rsvd[2]; + u8 iw_chnl_buf[1]; +}; + +enum i40iw_virtchnl_ops { + I40IW_VCHNL_OP_GET_VER = 0, + I40IW_VCHNL_OP_GET_HMC_FCN, + I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE, + I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE, + I40IW_VCHNL_OP_GET_STATS +}; + +#define I40IW_VCHNL_OP_GET_VER_V0 0 +#define I40IW_VCHNL_OP_GET_HMC_FCN_V0 0 +#define I40IW_VCHNL_OP_ADD_HMC_OBJ_RANGE_V0 0 +#define I40IW_VCHNL_OP_DEL_HMC_OBJ_RANGE_V0 0 +#define I40IW_VCHNL_OP_GET_STATS_V0 0 +#define I40IW_VCHNL_CHNL_VER_V0 0 + +struct i40iw_dev_hw_stats; + +struct i40iw_virtchnl_hmc_obj_range { + u16 obj_type; + u16 rsvd; + u32 start_index; + u32 obj_count; +}; + +enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len); + +enum i40iw_status_code i40iw_vchnl_recv_vf(struct i40iw_sc_dev *dev, + u32 vf_id, + u8 *msg, + u16 len); + +struct i40iw_virtchnl_req { + struct i40iw_sc_dev *dev; + struct i40iw_virtchnl_op_buf *vchnl_msg; + void *parm; + u32 vf_id; + u16 parm_len; + s16 ret_code; +}; + +#pragma pack(pop) + +enum i40iw_status_code i40iw_vchnl_vf_get_ver(struct i40iw_sc_dev *dev, + u32 *vchnl_ver); + +enum i40iw_status_code i40iw_vchnl_vf_get_hmc_fcn(struct i40iw_sc_dev *dev, + u16 *hmc_fcn); + +enum i40iw_status_code i40iw_vchnl_vf_add_hmc_objs(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count); + +enum i40iw_status_code i40iw_vchnl_vf_del_hmc_obj(struct i40iw_sc_dev *dev, + enum i40iw_hmc_rsrc_type rsrc_type, + u32 start_index, + u32 rsrc_count); + +enum i40iw_status_code i40iw_vchnl_vf_get_pe_stats(struct i40iw_sc_dev *dev, + struct i40iw_dev_hw_stats *hw_stats); +#endif |