diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/scsi/bnx2fc | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/scsi/bnx2fc')
-rw-r--r-- | drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h | 1004 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/Kconfig | 14 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/Makefile | 5 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc.h | 608 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_constants.h | 288 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_debug.c | 84 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_debug.h | 47 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_els.c | 950 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 2989 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_hwi.c | 2199 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_io.c | 2102 | ||||
-rw-r--r-- | drivers/scsi/bnx2fc/bnx2fc_tgt.c | 896 |
12 files changed, 11186 insertions, 0 deletions
diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h new file mode 100644 index 000000000..698f5ebaa --- /dev/null +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -0,0 +1,1004 @@ +/* 57xx_hsi_bnx2fc.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __57XX_FCOE_HSI_LINUX_LE__ +#define __57XX_FCOE_HSI_LINUX_LE__ + +/* + * common data for all protocols + */ +struct b577xx_doorbell_hdr { + u8 header; +#define B577XX_DOORBELL_HDR_RX (0x1<<0) +#define B577XX_DOORBELL_HDR_RX_SHIFT 0 +#define B577XX_DOORBELL_HDR_DB_TYPE (0x1<<1) +#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT 1 +#define B577XX_DOORBELL_HDR_DPM_SIZE (0x3<<2) +#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT 2 +#define B577XX_DOORBELL_HDR_CONN_TYPE (0xF<<4) +#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT 4 +}; + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell { +#if defined(__BIG_ENDIAN) + u16 zero_fill2; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 zero_fill2; +#endif +}; + + + +/* + * doorbell message sent to the chip + */ +struct b577xx_doorbell_set_prod { +#if defined(__BIG_ENDIAN) + u16 prod; + u8 zero_fill1; + struct b577xx_doorbell_hdr header; +#elif defined(__LITTLE_ENDIAN) + struct b577xx_doorbell_hdr header; + u8 zero_fill1; + u16 prod; +#endif +}; + + +struct regpair { + __le32 lo; + __le32 hi; +}; + + +/* + * ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_info { + __le16 aborted_task_id; + __le16 reserved0; + __le32 reserved1; +}; + + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_abts_rsp_union { + u8 r_ctl; + u8 rsrv[3]; + __le32 abts_rsp_payload[7]; +}; + + +/* + * 4 regs size $$KEEP_ENDIANNESS$$ + */ +struct fcoe_bd_ctx { + __le32 buf_addr_hi; + __le32 buf_addr_lo; + __le16 buf_len; + __le16 rsrv0; + __le16 flags; + __le16 rsrv1; +}; + + +/* + * FCoE cached sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cached_sge_ctx { + struct regpair cur_buf_addr; + __le16 cur_buf_rem; + __le16 second_buf_rem; + struct regpair second_buf_addr; +}; + + +/* + * Cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cleanup_info { + __le16 cleaned_task_id; + __le16 rolled_tx_seq_cnt; + __le32 rolled_tx_data_offset; +}; + + +/* + * Fcp RSP flags $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_flags { + u8 flags; +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0) +#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0 +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1) +#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2 +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3) +#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3 +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4) +#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4 +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5) +#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5 +}; + +/* + * Fcp RSP payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_payload { + struct regpair reserved0; + __le32 fcp_resid; + u8 scsi_status_code; + struct fcoe_fcp_rsp_flags fcp_flags; + __le16 retry_delay_timer; + __le32 fcp_rsp_len; + __le32 fcp_sns_len; +}; + +/* + * Fixed size structure in order to plant it in Union structure + * $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_rsp_union { + struct fcoe_fcp_rsp_payload payload; + struct regpair reserved0; +}; + +/* + * FC header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_hdr { + u8 s_id[3]; + u8 cs_ctl; + u8 d_id[3]; + u8 r_ctl; + __le16 seq_cnt; + u8 df_ctl; + u8 seq_id; + u8 f_ctl[3]; + u8 type; + __le32 parameters; + __le16 rx_id; + __le16 ox_id; +}; + +/* + * FC header union $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mp_rsp_union { + struct fcoe_fc_hdr fc_hdr; + __le32 mp_payload_len; + __le32 rsrv; +}; + +/* + * Completion information $$KEEP_ENDIANNESS$$ + */ +union fcoe_comp_flow_info { + struct fcoe_fcp_rsp_union fcp_rsp; + struct fcoe_abts_rsp_union abts_rsp; + struct fcoe_mp_rsp_union mp_rsp; + __le32 opaque[8]; +}; + + +/* + * External ABTS info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_abts_info { + __le32 rsrv0[6]; + struct fcoe_abts_info ctx; +}; + + +/* + * External cleanup info $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_cleanup_info { + __le32 rsrv0[6]; + struct fcoe_cleanup_info ctx; +}; + + +/* + * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fw_tx_seq_ctx { + __le32 data_offset; + __le16 seq_cnt; + __le16 rsrv0; +}; + +/* + * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_fw_tx_seq_ctx { + __le32 rsrv0[6]; + struct fcoe_fw_tx_seq_ctx ctx; +}; + + +/* + * FCoE multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_mul_sges_ctx { + struct regpair cur_sge_addr; + __le16 cur_sge_off; + u8 cur_sge_idx; + u8 sgl_size; +}; + +/* + * FCoE external multiple sges context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_ext_mul_sges_ctx { + struct fcoe_mul_sges_ctx mul_sgl; + struct regpair rsrv0; +}; + + +/* + * FCP CMD payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_cmd_payload { + __le32 opaque[8]; +}; + + + + + +/* + * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fcp_xfr_rdy_payload { + __le32 burst_len; + __le32 data_ro; +}; + + +/* + * FC frame $$KEEP_ENDIANNESS$$ + */ +struct fcoe_fc_frame { + struct fcoe_fc_hdr fc_hdr; + __le32 reserved0[2]; +}; + + + + +/* + * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$ + */ +union fcoe_kcqe_params { + __le32 reserved0[4]; +}; + +/* + * FCoE KCQ CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kcqe { + __le32 fcoe_conn_id; + __le32 completion_status; + __le32 fcoe_conn_context_id; + union fcoe_kcqe_params params; + __le16 qe_self_seq; + u8 op_code; + u8 flags; +#define FCOE_KCQE_RESERVED0 (0x7<<0) +#define FCOE_KCQE_RESERVED0_SHIFT 0 +#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3) +#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3 +#define FCOE_KCQE_LAYER_CODE (0x7<<4) +#define FCOE_KCQE_LAYER_CODE_SHIFT 4 +#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7) +#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7 +}; + + + +/* + * FCoE KWQE header $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_header { + u8 op_code; + u8 flags; +#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0) +#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0 +#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4) +#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4 +#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7) +#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init1 { + __le16 num_tasks; + struct fcoe_kwqe_header hdr; + __le32 task_list_pbl_addr_lo; + __le32 task_list_pbl_addr_hi; + __le32 dummy_buffer_addr_lo; + __le32 dummy_buffer_addr_hi; + __le16 sq_num_wqes; + __le16 rq_num_wqes; + __le16 rq_buffer_log_size; + __le16 cq_num_wqes; + __le16 mtu; + u8 num_sessions_log; + u8 flags; +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0) +#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0 +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4) +#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4 +#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7) +#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7 +}; + +/* + * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init2 { + u8 hsi_major_version; + u8 hsi_minor_version; + struct fcoe_kwqe_header hdr; + __le32 hash_tbl_pbl_addr_lo; + __le32 hash_tbl_pbl_addr_hi; + __le32 t2_hash_tbl_addr_lo; + __le32 t2_hash_tbl_addr_hi; + __le32 t2_ptr_hash_tbl_addr_lo; + __le32 t2_ptr_hash_tbl_addr_hi; + __le32 free_list_count; +}; + +/* + * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_init3 { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 error_bit_map_lo; + __le32 error_bit_map_hi; + u8 perf_config; + u8 reserved21[3]; + __le32 reserved2[4]; +}; + +/* + * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload1 { + __le16 fcoe_conn_id; + struct fcoe_kwqe_header hdr; + __le32 sq_addr_lo; + __le32 sq_addr_hi; + __le32 rq_pbl_addr_lo; + __le32 rq_pbl_addr_hi; + __le32 rq_first_pbe_addr_lo; + __le32 rq_first_pbe_addr_hi; + __le16 rq_prod; + __le16 reserved0; +}; + +/* + * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload2 { + __le16 tx_max_fc_pay_len; + struct fcoe_kwqe_header hdr; + __le32 cq_addr_lo; + __le32 cq_addr_hi; + __le32 xferq_addr_lo; + __le32 xferq_addr_hi; + __le32 conn_db_addr_lo; + __le32 conn_db_addr_hi; + __le32 reserved1; +}; + +/* + * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload3 { + __le16 vlan_tag; +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12) +#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13 + struct fcoe_kwqe_header hdr; + u8 s_id[3]; + u8 tx_max_conc_seqs_c3; + u8 d_id[3]; + u8 flags; +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0) +#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0 +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1) +#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2 +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3) +#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3 +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4) +#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4 +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5) +#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5 +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6) +#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6 +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7) +#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7 + __le32 reserved; + __le32 confq_first_pbe_addr_lo; + __le32 confq_first_pbe_addr_hi; + __le16 tx_total_conc_seqs; + __le16 rx_max_fc_pay_len; + __le16 rx_total_conc_seqs; + u8 rx_max_conc_seqs_c3; + u8 rx_open_seqs_exch_c3; +}; + +/* + * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_offload4 { + u8 e_d_tov_timer_val; + u8 reserved2; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u8 dst_mac_addr_hi[2]; + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + __le32 lcq_addr_lo; + __le32 lcq_addr_hi; + __le32 confq_pbl_base_addr_lo; + __le32 confq_pbl_base_addr_hi; +}; + +/* + * FCoE connection enable request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_enable_disable { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + u8 src_mac_addr_lo[2]; + u8 src_mac_addr_mid[2]; + u8 src_mac_addr_hi[2]; + u16 vlan_tag; +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12 +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13) +#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13 + u8 dst_mac_addr_lo[2]; + u8 dst_mac_addr_mid[2]; + u8 dst_mac_addr_hi[2]; + __le16 reserved1; + u8 s_id[3]; + u8 vlan_flag; + u8 d_id[3]; + u8 reserved3; + __le32 context_id; + __le32 conn_id; + __le32 reserved4; +}; + +/* + * FCoE connection destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_conn_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 context_id; + __le32 conn_id; + __le32 reserved1[5]; +}; + +/* + * FCoe destroy request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_destroy { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 reserved1[7]; +}; + +/* + * FCoe statistics request $$KEEP_ENDIANNESS$$ + */ +struct fcoe_kwqe_stat { + __le16 reserved0; + struct fcoe_kwqe_header hdr; + __le32 stat_params_addr_lo; + __le32 stat_params_addr_hi; + __le32 reserved1[5]; +}; + +/* + * FCoE KWQ WQE $$KEEP_ENDIANNESS$$ + */ +union fcoe_kwqe { + struct fcoe_kwqe_init1 init1; + struct fcoe_kwqe_init2 init2; + struct fcoe_kwqe_init3 init3; + struct fcoe_kwqe_conn_offload1 conn_offload1; + struct fcoe_kwqe_conn_offload2 conn_offload2; + struct fcoe_kwqe_conn_offload3 conn_offload3; + struct fcoe_kwqe_conn_offload4 conn_offload4; + struct fcoe_kwqe_conn_enable_disable conn_enable_disable; + struct fcoe_kwqe_conn_destroy conn_destroy; + struct fcoe_kwqe_destroy destroy; + struct fcoe_kwqe_stat statistics; +}; + + + + + + + + + + + + + + + + +/* + * TX SGL context $$KEEP_ENDIANNESS$$ + */ +union fcoe_sgl_union_ctx { + struct fcoe_cached_sge_ctx cached_sge; + struct fcoe_ext_mul_sges_ctx sgl; + __le32 opaque[5]; +}; + +/* + * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$ + */ +struct fcoe_read_flow_info { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0[3]; +}; + + +/* + * Fcoe stat context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_s_stat_ctx { + u8 flags; +#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0) +#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0 +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1) +#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1 +#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2) +#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2 +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3) +#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3 +#define FCOE_S_STAT_CTX_P_RJT (0x1<<4) +#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4 +#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5) +#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5 +#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6) +#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6 +}; + +/* + * Fcoe rx seq context $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_seq_ctx { + u8 seq_id; + struct fcoe_s_stat_ctx s_stat; + __le16 seq_cnt; + __le32 low_exp_ro; + __le32 high_exp_ro; +}; + + +/* + * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$ + */ +union fcoe_rx_wr_union_ctx { + struct fcoe_read_flow_info read_info; + union fcoe_comp_flow_info comp_info; + __le32 opaque[8]; +}; + + + +/* + * FCoE SQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_sqe { + __le16 wqe; +#define FCOE_SQE_TASK_ID (0x7FFF<<0) +#define FCOE_SQE_TASK_ID_SHIFT 0 +#define FCOE_SQE_TOGGLE_BIT (0x1<<15) +#define FCOE_SQE_TOGGLE_BIT_SHIFT 15 +}; + + + +/* + * 14 regs $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_only { + union fcoe_sgl_union_ctx sgl_ctx; + __le32 rsrv0; +}; + +/* + * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$ + */ +union fcoe_tx_wr_rx_rd_union_ctx { + struct fcoe_fc_frame tx_frame; + struct fcoe_fcp_cmd_payload fcp_cmd; + struct fcoe_ext_cleanup_info cleanup; + struct fcoe_ext_abts_info abts; + struct fcoe_ext_fw_tx_seq_ctx tx_seq; + __le32 opaque[8]; +}; + +/* + * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd_const { + u8 init_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3) +#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4 +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7 + u8 tx_flags; +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5 +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6) +#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6 +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7) +#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7 + __le16 rsrv3; + __le32 verify_tx_seq; +}; + +/* + * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_tx_wr_rx_rd { + union fcoe_tx_wr_rx_rd_union_ctx union_ctx; + struct fcoe_tce_tx_wr_rx_rd_const const_ctx; +}; + +/* + * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_const { + __le32 data_2_trns; + __le32 init_flags; +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0) +#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24) +#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24 +}; + +/* + * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd_var { + __le16 rx_flags; +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0 +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4) +#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4 +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7) +#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8 +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12) +#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14 +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15) +#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15 + __le16 rx_id; + struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy; +}; + +/* + * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_wr_tx_rd { + struct fcoe_tce_rx_wr_tx_rd_const const_ctx; + struct fcoe_tce_rx_wr_tx_rd_var var_ctx; +}; + +/* + * tce_rx_only $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tce_rx_only { + struct fcoe_rx_seq_ctx rx_seq_ctx; + union fcoe_rx_wr_union_ctx union_ctx; +}; + +/* + * task_ctx_entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_task_ctx_entry { + struct fcoe_tce_tx_only txwr_only; + struct fcoe_tce_tx_wr_rx_rd txwr_rxrd; + struct fcoe_tce_rx_wr_tx_rd rxwr_txrd; + struct fcoe_tce_rx_only rxwr_only; +}; + + + + + + + + + + +/* + * FCoE XFRQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_xfrqe { + __le16 wqe; +#define FCOE_XFRQE_TASK_ID (0x7FFF<<0) +#define FCOE_XFRQE_TASK_ID_SHIFT 0 +#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15) +#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * fcoe rx doorbell message sent to the chip $$KEEP_ENDIANNESS$$ + */ +struct b577xx_fcoe_rx_doorbell { + struct b577xx_doorbell_hdr hdr; + u8 params; +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM (0x1F<<0) +#define B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT 0 +#define B577XX_FCOE_RX_DOORBELL_OPCODE (0x7<<5) +#define B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT 5 + __le16 doorbell_cq_cons; +}; + + +/* + * FCoE CONFQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_confqe { + __le16 ox_id; + __le16 rx_id; + __le32 param; +}; + + +/* + * FCoE connection data base + */ +struct fcoe_conn_db { +#if defined(__BIG_ENDIAN) + u16 rsrv0; + u16 rq_prod; +#elif defined(__LITTLE_ENDIAN) + u16 rq_prod; + u16 rsrv0; +#endif + u32 rsrv1; + struct regpair cq_arm; +}; + + +/* + * FCoE CQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_cqe { + __le16 wqe; +#define FCOE_CQE_CQE_INFO (0x3FFF<<0) +#define FCOE_CQE_CQE_INFO_SHIFT 0 +#define FCOE_CQE_CQE_TYPE (0x1<<14) +#define FCOE_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_partial_err_report_entry { + __le32 err_warn_bitmap_lo; + __le32 err_warn_bitmap_hi; + __le32 tx_buf_off; + __le32 rx_buf_off; +}; + +/* + * FCoE error/warning reporting entry $$KEEP_ENDIANNESS$$ + */ +struct fcoe_err_report_entry { + struct fcoe_partial_err_report_entry data; + struct fcoe_fc_hdr fc_hdr; +}; + + +/* + * FCoE hash table entry (32 bytes) $$KEEP_ENDIANNESS$$ + */ +struct fcoe_hash_table_entry { + u8 s_id_0; + u8 s_id_1; + u8 s_id_2; + u8 d_id_0; + u8 d_id_1; + u8 d_id_2; + __le16 dst_mac_addr_hi; + __le16 dst_mac_addr_mid; + __le16 dst_mac_addr_lo; + __le16 src_mac_addr_hi; + __le16 vlan_id; + __le16 src_mac_addr_lo; + __le16 src_mac_addr_mid; + u8 vlan_flag; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; + __le32 field_id; +#define FCOE_HASH_TABLE_ENTRY_CID (0xFFFFFF<<0) +#define FCOE_HASH_TABLE_ENTRY_CID_SHIFT 0 +#define FCOE_HASH_TABLE_ENTRY_RESERVED3 (0x7F<<24) +#define FCOE_HASH_TABLE_ENTRY_RESERVED3_SHIFT 24 +#define FCOE_HASH_TABLE_ENTRY_VALID (0x1<<31) +#define FCOE_HASH_TABLE_ENTRY_VALID_SHIFT 31 +}; + + +/* + * FCoE LCQ element $$KEEP_ENDIANNESS$$ + */ +struct fcoe_lcqe { + __le32 wqe; +#define FCOE_LCQE_TASK_ID (0xFFFF<<0) +#define FCOE_LCQE_TASK_ID_SHIFT 0 +#define FCOE_LCQE_LCQE_TYPE (0xFF<<16) +#define FCOE_LCQE_LCQE_TYPE_SHIFT 16 +#define FCOE_LCQE_RESERVED (0xFF<<24) +#define FCOE_LCQE_RESERVED_SHIFT 24 +}; + + + +/* + * FCoE pending work request CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_pend_wq_cqe { + __le16 wqe; +#define FCOE_PEND_WQ_CQE_TASK_ID (0x3FFF<<0) +#define FCOE_PEND_WQ_CQE_TASK_ID_SHIFT 0 +#define FCOE_PEND_WQ_CQE_CQE_TYPE (0x1<<14) +#define FCOE_PEND_WQ_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_PEND_WQ_CQE_TOGGLE_BIT_SHIFT 15 +}; + + +/* + * FCoE RX statistics parameters section#0 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section0 { + __le32 fcoe_rx_pkt_cnt; + __le32 fcoe_rx_byte_cnt; +}; + + +/* + * FCoE RX statistics parameters section#1 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section1 { + __le32 fcoe_ver_cnt; + __le32 fcoe_rx_drop_pkt_cnt; +}; + + +/* + * FCoE RX statistics parameters section#2 $$KEEP_ENDIANNESS$$ + */ +struct fcoe_rx_stat_params_section2 { + __le32 fc_crc_cnt; + __le32 eofa_del_cnt; + __le32 miss_frame_cnt; + __le32 seq_timeout_cnt; + __le32 drop_seq_cnt; + __le32 fcoe_rx_drop_pkt_cnt; + __le32 fcp_rx_pkt_cnt; + __le32 reserved0; +}; + + +/* + * FCoE TX statistics parameters $$KEEP_ENDIANNESS$$ + */ +struct fcoe_tx_stat_params { + __le32 fcoe_tx_pkt_cnt; + __le32 fcoe_tx_byte_cnt; + __le32 fcp_tx_pkt_cnt; + __le32 reserved0; +}; + +/* + * FCoE statistics parameters $$KEEP_ENDIANNESS$$ + */ +struct fcoe_statistics_params { + struct fcoe_tx_stat_params tx_stat; + struct fcoe_rx_stat_params_section0 rx_stat0; + struct fcoe_rx_stat_params_section1 rx_stat1; + struct fcoe_rx_stat_params_section2 rx_stat2; +}; + + +/* + * FCoE t2 hash table entry (64 bytes) $$KEEP_ENDIANNESS$$ + */ +struct fcoe_t2_hash_table_entry { + struct fcoe_hash_table_entry data; + struct regpair next; + struct regpair reserved0[3]; +}; + + + +/* + * FCoE unsolicited CQE $$KEEP_ENDIANNESS$$ + */ +struct fcoe_unsolicited_cqe { + __le16 wqe; +#define FCOE_UNSOLICITED_CQE_SUBTYPE (0x3<<0) +#define FCOE_UNSOLICITED_CQE_SUBTYPE_SHIFT 0 +#define FCOE_UNSOLICITED_CQE_PKT_LEN (0xFFF<<2) +#define FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT 2 +#define FCOE_UNSOLICITED_CQE_CQE_TYPE (0x1<<14) +#define FCOE_UNSOLICITED_CQE_CQE_TYPE_SHIFT 14 +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT (0x1<<15) +#define FCOE_UNSOLICITED_CQE_TOGGLE_BIT_SHIFT 15 +}; + +#endif /* __57XX_FCOE_HSI_LINUX_LE__ */ diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig new file mode 100644 index 000000000..ecdc0f0f4 --- /dev/null +++ b/drivers/scsi/bnx2fc/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-only +config SCSI_BNX2X_FCOE + tristate "QLogic FCoE offload support" + depends on PCI + depends on (IPV6 || IPV6=n) + depends on LIBFC + depends on LIBFCOE + depends on MMU + select NETDEVICES + select ETHERNET + select NET_VENDOR_BROADCOM + select CNIC + help + This driver supports FCoE offload for the QLogic devices. diff --git a/drivers/scsi/bnx2fc/Makefile b/drivers/scsi/bnx2fc/Makefile new file mode 100644 index 000000000..1d72e279a --- /dev/null +++ b/drivers/scsi/bnx2fc/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SCSI_BNX2X_FCOE) += bnx2fc.o + +bnx2fc-y := bnx2fc_els.o bnx2fc_fcoe.o bnx2fc_hwi.o bnx2fc_io.o bnx2fc_tgt.o \ + bnx2fc_debug.o diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h new file mode 100644 index 000000000..046247420 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -0,0 +1,608 @@ +/* bnx2fc.h: QLogic Linux FCoE offload driver. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#ifndef _BNX2FC_H_ +#define _BNX2FC_H_ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/kernel.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/kthread.h> +#include <linux/crc32.h> +#include <linux/cpu.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/delay.h> +#include <linux/timer.h> +#include <linux/errno.h> +#include <linux/pci.h> +#include <linux/init.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/bitops.h> +#include <linux/log2.h> +#include <linux/interrupt.h> +#include <linux/sched/signal.h> +#include <linux/io.h> + +#include <scsi/scsi.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_eh.h> +#include <scsi/scsi_tcq.h> +#include <scsi/libfc.h> +#include <scsi/libfcoe.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc/fc_fip.h> +#include <scsi/fc/fc_fc2.h> +#include <scsi/fc_frame.h> +#include <scsi/fc/fc_fcoe.h> +#include <scsi/fc/fc_fcp.h> + +#include "57xx_hsi_bnx2fc.h" +#include "../../net/ethernet/broadcom/cnic_if.h" +#include "../../net/ethernet/broadcom/bnx2x/bnx2x_mfw_req.h" +#include "bnx2fc_constants.h" + +#define BNX2FC_NAME "bnx2fc" +#define BNX2FC_VERSION "2.12.13" + +#define PFX "bnx2fc: " + +#define BCM_CHIP_LEN 16 + +#define BNX2X_DOORBELL_PCI_BAR 2 + +#define BNX2FC_MAX_BD_LEN 0xffff +#define BNX2FC_BD_SPLIT_SZ 0xffff +#define BNX2FC_MAX_BDS_PER_CMD 255 +#define BNX2FC_FW_MAX_BDS_PER_CMD 255 + +#define BNX2FC_SQ_WQES_MAX 256 + +#define BNX2FC_SCSI_MAX_SQES ((3 * BNX2FC_SQ_WQES_MAX) / 8) +#define BNX2FC_TM_MAX_SQES ((BNX2FC_SQ_WQES_MAX) / 2) +#define BNX2FC_ELS_MAX_SQES (BNX2FC_TM_MAX_SQES - 1) + +#define BNX2FC_RQ_WQES_MAX 16 +#define BNX2FC_CQ_WQES_MAX (BNX2FC_SQ_WQES_MAX + BNX2FC_RQ_WQES_MAX) + +#define BNX2FC_NUM_MAX_SESS 1024 +#define BNX2FC_NUM_MAX_SESS_LOG (ilog2(BNX2FC_NUM_MAX_SESS)) + +#define BNX2FC_MAX_NPIV 256 + +#define BNX2FC_MIN_PAYLOAD 256 +#define BNX2FC_MAX_PAYLOAD 2048 +#define BNX2FC_MFS \ + (BNX2FC_MAX_PAYLOAD + sizeof(struct fc_frame_header)) +#define BNX2FC_MINI_JUMBO_MTU 2500 + + +#define BNX2FC_RQ_BUF_SZ 256 +#define BNX2FC_RQ_BUF_LOG_SZ (ilog2(BNX2FC_RQ_BUF_SZ)) + +#define BNX2FC_SQ_WQE_SIZE (sizeof(struct fcoe_sqe)) +#define BNX2FC_CQ_WQE_SIZE (sizeof(struct fcoe_cqe)) +#define BNX2FC_RQ_WQE_SIZE (BNX2FC_RQ_BUF_SZ) +#define BNX2FC_XFERQ_WQE_SIZE (sizeof(struct fcoe_xfrqe)) +#define BNX2FC_CONFQ_WQE_SIZE (sizeof(struct fcoe_confqe)) +#define BNX2X_DB_SHIFT 3 + +#define BNX2FC_TASK_SIZE 128 +#define BNX2FC_TASKS_PER_PAGE (PAGE_SIZE/BNX2FC_TASK_SIZE) + +#define BNX2FC_MAX_ROWS_IN_HASH_TBL 8 +#define BNX2FC_HASH_TBL_CHUNK_SIZE (16 * 1024) + +#define BNX2FC_MAX_SEQS 255 +#define BNX2FC_MAX_RETRY_CNT 3 +#define BNX2FC_MAX_RPORT_RETRY_CNT 255 + +#define BNX2FC_READ (1 << 1) +#define BNX2FC_WRITE (1 << 0) + +#define BNX2FC_MIN_XID 0 +#define FCOE_MAX_NUM_XIDS 0x2000 +#define FCOE_MAX_XID_OFFSET (FCOE_MAX_NUM_XIDS - 1) +#define FCOE_XIDS_PER_CPU_OFFSET ((512 * nr_cpu_ids) - 1) +#define BNX2FC_MAX_LUN 0xFFFF +#define BNX2FC_MAX_FCP_TGT 256 +#define BNX2FC_MAX_CMD_LEN 16 + +#define BNX2FC_TM_TIMEOUT 60 /* secs */ +#define BNX2FC_IO_TIMEOUT 20000UL /* msecs */ + +#define BNX2FC_WAIT_CNT 1200 +#define BNX2FC_FW_TIMEOUT (3 * HZ) +#define PORT_MAX 2 + +/* FC FCP Status */ +#define FC_GOOD 0 + +#define BNX2FC_RNID_HBA 0x7 + +#define SRR_RETRY_COUNT 5 +#define REC_RETRY_COUNT 1 +#define BNX2FC_NUM_ERR_BITS 63 + +#define BNX2FC_RELOGIN_WAIT_TIME 200 +#define BNX2FC_RELOGIN_WAIT_CNT 10 + +#define BNX2FC_STATS(hba, stat, cnt) \ + do { \ + u32 val; \ + \ + val = fw_stats->stat.cnt; \ + if (hba->prev_stats.stat.cnt <= val) \ + val -= hba->prev_stats.stat.cnt; \ + else \ + val += (0xfffffff - hba->prev_stats.stat.cnt); \ + hba->bfw_stats.cnt += val; \ + } while (0) + +/* bnx2fc driver uses only one instance of fcoe_percpu_s */ +extern struct fcoe_percpu_s bnx2fc_global; + +extern struct workqueue_struct *bnx2fc_wq; + +struct bnx2fc_percpu_s { + struct task_struct *iothread; + struct list_head work_list; + spinlock_t fp_work_lock; +}; + +struct bnx2fc_fw_stats { + u64 fc_crc_cnt; + u64 fcoe_tx_pkt_cnt; + u64 fcoe_rx_pkt_cnt; + u64 fcoe_tx_byte_cnt; + u64 fcoe_rx_byte_cnt; +}; + +struct bnx2fc_hba { + struct list_head list; + struct cnic_dev *cnic; + struct pci_dev *pcidev; + struct net_device *phys_dev; + unsigned long reg_with_cnic; + #define BNX2FC_CNIC_REGISTERED 1 + struct bnx2fc_cmd_mgr *cmd_mgr; + spinlock_t hba_lock; + struct mutex hba_mutex; + struct mutex hba_stats_mutex; + unsigned long adapter_state; + #define ADAPTER_STATE_UP 0 + #define ADAPTER_STATE_GOING_DOWN 1 + #define ADAPTER_STATE_LINK_DOWN 2 + #define ADAPTER_STATE_READY 3 + unsigned long flags; + #define BNX2FC_FLAG_FW_INIT_DONE 0 + #define BNX2FC_FLAG_DESTROY_CMPL 1 + u32 next_conn_id; + + /* xid resources */ + u16 max_xid; + u32 max_tasks; + u32 max_outstanding_cmds; + u32 elstm_xids; + + struct fcoe_task_ctx_entry **task_ctx; + dma_addr_t *task_ctx_dma; + struct regpair *task_ctx_bd_tbl; + dma_addr_t task_ctx_bd_dma; + + int hash_tbl_segment_count; + void **hash_tbl_segments; + void *hash_tbl_pbl; + dma_addr_t hash_tbl_pbl_dma; + struct fcoe_t2_hash_table_entry *t2_hash_tbl; + dma_addr_t t2_hash_tbl_dma; + char *t2_hash_tbl_ptr; + dma_addr_t t2_hash_tbl_ptr_dma; + + char *dummy_buffer; + dma_addr_t dummy_buf_dma; + + /* Active list of offloaded sessions */ + struct bnx2fc_rport **tgt_ofld_list; + + /* statistics */ + struct bnx2fc_fw_stats bfw_stats; + struct fcoe_statistics_params prev_stats; + struct fcoe_statistics_params *stats_buffer; + dma_addr_t stats_buf_dma; + struct completion stat_req_done; + struct fcoe_capabilities fcoe_cap; + + /*destroy handling */ + struct timer_list destroy_timer; + wait_queue_head_t destroy_wait; + + /* linkdown handling */ + wait_queue_head_t shutdown_wait; + int wait_for_link_down; + int num_ofld_sess; + struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; +}; + +struct bnx2fc_interface { + struct list_head list; + unsigned long if_flags; + #define BNX2FC_CTLR_INIT_DONE 0 + struct bnx2fc_hba *hba; + struct net_device *netdev; + struct packet_type fcoe_packet_type; + struct packet_type fip_packet_type; + struct workqueue_struct *timer_work_queue; + struct kref kref; + u8 vlan_enabled; + int vlan_id; + bool enabled; + u8 tm_timeout; +}; + +#define bnx2fc_from_ctlr(x) \ + ((struct bnx2fc_interface *)((x) + 1)) + +#define bnx2fc_to_ctlr(x) \ + ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)) + +struct bnx2fc_lport { + struct list_head list; + struct fc_lport *lport; +}; + +struct bnx2fc_cmd_mgr { + struct bnx2fc_hba *hba; + u16 next_idx; + struct list_head *free_list; + spinlock_t *free_list_lock; + struct io_bdt **io_bdt_pool; + struct bnx2fc_cmd **cmds; +}; + +struct bnx2fc_rport { + struct fcoe_port *port; + struct fc_rport *rport; + struct fc_rport_priv *rdata; + void __iomem *ctx_base; +#define DPM_TRIGER_TYPE 0x40 + u32 io_timeout; + u32 fcoe_conn_id; + u32 context_id; + u32 sid; + int dev_type; + + unsigned long flags; +#define BNX2FC_FLAG_SESSION_READY 0x1 +#define BNX2FC_FLAG_OFFLOADED 0x2 +#define BNX2FC_FLAG_DISABLED 0x3 +#define BNX2FC_FLAG_DESTROYED 0x4 +#define BNX2FC_FLAG_OFLD_REQ_CMPL 0x5 +#define BNX2FC_FLAG_CTX_ALLOC_FAILURE 0x6 +#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7 +#define BNX2FC_FLAG_DISABLE_FAILED 0x9 +#define BNX2FC_FLAG_ENABLED 0xa + + u8 src_addr[ETH_ALEN]; + u32 max_sqes; + u32 max_rqes; + u32 max_cqes; + atomic_t free_sqes; + + struct b577xx_doorbell_set_prod sq_db; + struct b577xx_fcoe_rx_doorbell rx_db; + + struct fcoe_sqe *sq; + dma_addr_t sq_dma; + u16 sq_prod_idx; + u8 sq_curr_toggle_bit; + u32 sq_mem_size; + + struct fcoe_cqe *cq; + dma_addr_t cq_dma; + u16 cq_cons_idx; + u8 cq_curr_toggle_bit; + u32 cq_mem_size; + + void *rq; + dma_addr_t rq_dma; + u32 rq_prod_idx; + u32 rq_cons_idx; + u32 rq_mem_size; + + void *rq_pbl; + dma_addr_t rq_pbl_dma; + u32 rq_pbl_size; + + struct fcoe_xfrqe *xferq; + dma_addr_t xferq_dma; + u32 xferq_mem_size; + + struct fcoe_confqe *confq; + dma_addr_t confq_dma; + u32 confq_mem_size; + + void *confq_pbl; + dma_addr_t confq_pbl_dma; + u32 confq_pbl_size; + + struct fcoe_conn_db *conn_db; + dma_addr_t conn_db_dma; + u32 conn_db_mem_size; + + struct fcoe_sqe *lcq; + dma_addr_t lcq_dma; + u32 lcq_mem_size; + + void *ofld_req[4]; + dma_addr_t ofld_req_dma[4]; + void *enbl_req; + dma_addr_t enbl_req_dma; + + spinlock_t tgt_lock; + spinlock_t cq_lock; + atomic_t num_active_ios; + u32 flush_in_prog; + unsigned long timestamp; + unsigned long retry_delay_timestamp; + struct list_head free_task_list; + struct bnx2fc_cmd *pending_queue[BNX2FC_SQ_WQES_MAX+1]; + struct list_head active_cmd_queue; + struct list_head els_queue; + struct list_head io_retire_queue; + struct list_head active_tm_queue; + + struct timer_list ofld_timer; + wait_queue_head_t ofld_wait; + + struct timer_list upld_timer; + wait_queue_head_t upld_wait; +}; + +struct bnx2fc_mp_req { + u8 tm_flags; + + u32 req_len; + void *req_buf; + dma_addr_t req_buf_dma; + struct fcoe_bd_ctx *mp_req_bd; + dma_addr_t mp_req_bd_dma; + struct fc_frame_header req_fc_hdr; + + u32 resp_len; + void *resp_buf; + dma_addr_t resp_buf_dma; + struct fcoe_bd_ctx *mp_resp_bd; + dma_addr_t mp_resp_bd_dma; + struct fc_frame_header resp_fc_hdr; +}; + +struct bnx2fc_els_cb_arg { + struct bnx2fc_cmd *aborted_io_req; + struct bnx2fc_cmd *io_req; + u16 l2_oxid; + u32 offset; + enum fc_rctl r_ctl; +}; + +/* bnx2fc command structure */ +struct bnx2fc_cmd { + struct list_head link; + u8 on_active_queue; + u8 on_tmf_queue; + u8 cmd_type; +#define BNX2FC_SCSI_CMD 1 +#define BNX2FC_TASK_MGMT_CMD 2 +#define BNX2FC_ABTS 3 +#define BNX2FC_ELS 4 +#define BNX2FC_CLEANUP 5 +#define BNX2FC_SEQ_CLEANUP 6 + u8 io_req_flags; + struct kref refcount; + struct fcoe_port *port; + struct bnx2fc_rport *tgt; + struct scsi_cmnd *sc_cmd; + struct bnx2fc_cmd_mgr *cmd_mgr; + struct bnx2fc_mp_req mp_req; + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg); + struct bnx2fc_els_cb_arg *cb_arg; + struct delayed_work timeout_work; /* timer for ULP timeouts */ + struct completion abts_done; + struct completion cleanup_done; + int wait_for_abts_comp; + int wait_for_cleanup_comp; + u16 xid; + struct fcoe_err_report_entry err_entry; + struct fcoe_task_ctx_entry *task; + struct io_bdt *bd_tbl; + struct fcp_rsp *rsp; + size_t data_xfer_len; + unsigned long req_flags; +#define BNX2FC_FLAG_ISSUE_RRQ 0x1 +#define BNX2FC_FLAG_ISSUE_ABTS 0x2 +#define BNX2FC_FLAG_ABTS_DONE 0x3 +#define BNX2FC_FLAG_TM_COMPL 0x4 +#define BNX2FC_FLAG_TM_TIMEOUT 0x5 +#define BNX2FC_FLAG_IO_CLEANUP 0x6 +#define BNX2FC_FLAG_RETIRE_OXID 0x7 +#define BNX2FC_FLAG_EH_ABORT 0x8 +#define BNX2FC_FLAG_IO_COMPL 0x9 +#define BNX2FC_FLAG_ELS_DONE 0xa +#define BNX2FC_FLAG_ELS_TIMEOUT 0xb +#define BNX2FC_FLAG_CMD_LOST 0xc +#define BNX2FC_FLAG_SRR_SENT 0xd +#define BNX2FC_FLAG_ISSUE_CLEANUP_REQ 0xe + u8 rec_retry; + u8 srr_retry; + u32 srr_offset; + u8 srr_rctl; + u32 fcp_resid; + u32 fcp_rsp_len; + u32 fcp_sns_len; + u8 cdb_status; /* SCSI IO status */ + u8 fcp_status; /* FCP IO status */ + u8 fcp_rsp_code; + u8 scsi_comp_flags; +}; + +struct io_bdt { + struct bnx2fc_cmd *io_req; + struct fcoe_bd_ctx *bd_tbl; + dma_addr_t bd_tbl_dma; + u16 bd_valid; +}; + +struct bnx2fc_work { + struct list_head list; + struct bnx2fc_rport *tgt; + struct fcoe_task_ctx_entry *task; + unsigned char rq_data[BNX2FC_RQ_BUF_SZ]; + u16 wqe; + u8 num_rq; +}; +struct bnx2fc_unsol_els { + struct fc_lport *lport; + struct fc_frame *fp; + struct bnx2fc_hba *hba; + struct work_struct unsol_els_work; +}; + +struct bnx2fc_priv { + struct bnx2fc_cmd *io_req; +}; + +static inline struct bnx2fc_priv *bnx2fc_priv(struct scsi_cmnd *cmd) +{ + return scsi_cmd_priv(cmd); +} + +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt); +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type); +void bnx2fc_cmd_release(struct kref *ref); +int bnx2fc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd); +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba); +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt); +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt); +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe); +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba); +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba); +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba); +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba); +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba); +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr); +void bnx2fc_get_link_state(struct bnx2fc_hba *hba); +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items); +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items); +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen); +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req); +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp); +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req); +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req); +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec); +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req); +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid); +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset); +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task); +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid); +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt); +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd); +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd); +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rport, + enum fc_rport_event event); +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data); +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data); +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq); +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd); + + + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt); +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout); +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt); +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt); +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task); +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id); +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid); +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba); +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, struct bnx2fc_cmd *io_req); +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req); +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl); +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnup_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state); +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl); + + +#include "bnx2fc_debug.h" + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_constants.h b/drivers/scsi/bnx2fc/bnx2fc_constants.h new file mode 100644 index 000000000..9ed150307 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_constants.h @@ -0,0 +1,288 @@ +/* bnx2fc_constants.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __BNX2FC_CONSTANTS_H_ +#define __BNX2FC_CONSTANTS_H_ + +/** + * This file defines HSI constants for the FCoE flows + */ + +/* Current FCoE HSI version number composed of two fields (16 bit) */ +/* Implies on a change broken previous HSI */ +#define FCOE_HSI_MAJOR_VERSION (2) +/* Implies on a change which does not broken previous HSI */ +#define FCOE_HSI_MINOR_VERSION (1) + +/* KWQ/KCQ FCoE layer code */ +#define FCOE_KWQE_LAYER_CODE (7) + +/* KWQ (kernel work queue) request op codes */ +#define FCOE_KWQE_OPCODE_INIT1 (0) +#define FCOE_KWQE_OPCODE_INIT2 (1) +#define FCOE_KWQE_OPCODE_INIT3 (2) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5) +#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6) +#define FCOE_KWQE_OPCODE_ENABLE_CONN (7) +#define FCOE_KWQE_OPCODE_DISABLE_CONN (8) +#define FCOE_KWQE_OPCODE_DESTROY_CONN (9) +#define FCOE_KWQE_OPCODE_DESTROY (10) +#define FCOE_KWQE_OPCODE_STAT (11) + +/* KCQ (kernel completion queue) response op codes */ +#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10) +#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11) +#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12) +#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15) +#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16) +#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17) +#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18) +#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20) +#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21) + +/* KCQ (kernel completion queue) completion status */ +#define FCOE_KCQE_COMPLETION_STATUS_SUCCESS (0x0) +#define FCOE_KCQE_COMPLETION_STATUS_ERROR (0x1) +#define FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE (0x2) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3) +#define FCOE_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE (0x4) +#define FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR (0x5) +#define FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION (0x6) +#define FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR (0x81) + +/* CQE type */ +#define FCOE_PENDING_CQE_TYPE 0 +#define FCOE_UNSOLIC_CQE_TYPE 1 + +/* Unsolicited CQE type */ +#define FCOE_UNSOLICITED_FRAME_CQE_TYPE 0 +#define FCOE_ERROR_DETECTION_CQE_TYPE 1 +#define FCOE_WARNING_DETECTION_CQE_TYPE 2 + +/* E_D_TOV timer resolution in ms */ +#define FCOE_E_D_TOV_TIMER_RESOLUTION_MS (20) + +/* E_D_TOV timer resolution for SDM (4 micro) */ +#define FCOE_E_D_TOV_SDM_TIMER_RESOLUTION \ + (FCOE_E_D_TOV_TIMER_RESOLUTION_MS * 1000 / 4) + +/* REC timer resolution in ms */ +#define FCOE_REC_TIMER_RESOLUTION_MS (20) + +/* REC timer resolution for SDM (4 micro) */ +#define FCOE_REC_SDM_TIMER_RESOLUTION (FCOE_REC_TIMER_RESOLUTION_MS * 1000 / 4) + +/* E_D_TOV timer default wraparound value (2 sec) in 20 ms resolution */ +#define FCOE_E_D_TOV_DEFAULT_WRAPAROUND_VAL \ + (2000 / FCOE_E_D_TOV_TIMER_RESOLUTION_MS) + +/* REC_TOV timer default wraparound value (3 sec) in 20 ms resolution */ +#define FCOE_REC_TOV_DEFAULT_WRAPAROUND_VAL \ + (3000 / FCOE_REC_TIMER_RESOLUTION_MS) + +#define FCOE_NUM_OF_TIMER_TASKS (8 * 1024) + +#define FCOE_NUM_OF_CACHED_TASKS_TIMER (8) + +/* Task context constants */ +/******** Remove FCP_CMD write tce sleep ***********************/ +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value + * +#define FCOE_TASK_TX_STATE_NORMAL 0 + * After driver has initialize the task in case timer services required * +#define FCOE_TASK_TX_STATE_INIT 1 +******** Remove FCP_CMD write tce sleep ***********************/ +/* After driver has initialize the task in case timer services required */ +#define FCOE_TASK_TX_STATE_INIT 0 +/* In case timer services are required then shall be updated by Xstorm after + * start processing the task. In case no timer facilities are required then the + * driver would initialize the state to this value + */ +#define FCOE_TASK_TX_STATE_NORMAL 1 +/* Task is under abort procedure. Updated in order to stop processing of + * pending WQEs on this task + */ +#define FCOE_TASK_TX_STATE_ABORT 2 +/* For E_D_T_TOV timer expiration in Xstorm (Class 2 only) */ +#define FCOE_TASK_TX_STATE_ERROR 3 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_TX_STATE_WARNING 4 +/* For completed unsolicited task */ +#define FCOE_TASK_TX_STATE_UNSOLICITED_COMPLETED 5 +/* For exchange cleanup request task */ +#define FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP 6 +/* For sequence cleanup request task */ +#define FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP 7 +/* For completion the ABTS task. */ +#define FCOE_TASK_TX_STATE_ABTS_TX 8 + +#define FCOE_TASK_RX_STATE_NORMAL 0 +#define FCOE_TASK_RX_STATE_COMPLETED 1 +/* Obsolete: Intermediate completion (middle path with local completion) */ +#define FCOE_TASK_RX_STATE_INTER_COMP 2 +/* For REC_TOV timer expiration indication received from Xstorm */ +#define FCOE_TASK_RX_STATE_WARNING 3 +/* For E_D_T_TOV timer expiration in Ustorm */ +#define FCOE_TASK_RX_STATE_ERROR 4 +/* FW only: First visit at rx-path, part of the abts round trip */ +#define FCOE_TASK_RX_STATE_ABTS_IN_PROCESS 5 +/* FW only: Second visit at rx-path, after ABTS frame transmitted */ +#define FCOE_TASK_RX_STATE_ABTS_TRANSMITTED 6 +/* Special completion indication in case of task was aborted. */ +#define FCOE_TASK_RX_STATE_ABTS_COMPLETED 7 +/* FW only: First visit at rx-path, part of the cleanup round trip */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_IN_PROCESS 8 +/* FW only: Special completion indication in case of task was cleaned. */ +#define FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED 9 +/* Not in used: Special completion indication (in task requested the exchange + * cleanup) in case cleaned task is in non-valid. + */ +#define FCOE_TASK_RX_STATE_ABORT_CLEANUP_COMPLETED 10 +/* Special completion indication (in task requested the sequence cleanup) in + * case cleaned task was already returned to normal. + */ +#define FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP 11 + + +#define FCOE_TASK_TYPE_WRITE 0 +#define FCOE_TASK_TYPE_READ 1 +#define FCOE_TASK_TYPE_MIDPATH 2 +#define FCOE_TASK_TYPE_UNSOLICITED 3 +#define FCOE_TASK_TYPE_ABTS 4 +#define FCOE_TASK_TYPE_EXCHANGE_CLEANUP 5 +#define FCOE_TASK_TYPE_SEQUENCE_CLEANUP 6 + +#define FCOE_TASK_DEV_TYPE_DISK 0 +#define FCOE_TASK_DEV_TYPE_TAPE 1 + +#define FCOE_TASK_CLASS_TYPE_3 0 +#define FCOE_TASK_CLASS_TYPE_2 1 + +/* FCoE/FC packet fields */ +#define FCOE_ETH_TYPE 0x8906 + +/* FCoE maximum elements in hash table */ +#define FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW 8 + +/* FCoE half of the elements in hash table */ +#define FCOE_HALF_ELEMENTS_IN_HASH_TABLE_ROW \ + (FCOE_MAX_ELEMENTS_IN_HASH_TABLE_ROW / 2) + +/* FcoE number of cached T2 entries */ +#define T_FCOE_NUMBER_OF_CACHED_T2_ENTRIES (4) + +/* FCoE maximum elements in hash table */ +#define FCOE_HASH_TBL_CHUNK_SIZE 16384 + +/* Everest FCoE connection type */ +#define B577XX_FCOE_CONNECTION_TYPE 4 + +/* FCoE number of rows (in log). This number derives + * from the maximum connections supported which is 2048. + * TBA: Need a different constant for E2 + */ +#define FCOE_MAX_NUM_SESSIONS_LOG 11 + +#define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 + +/* Error codes for Error Reporting in slow path flows */ +#define FCOE_SLOW_PATH_ERROR_CODE_TOO_MANY_FUNCS 0 +#define FCOE_SLOW_PATH_ERROR_CODE_NO_LICENSE 1 + +/* Error codes for Error Reporting in fast path flows + * XFER error codes + */ +#define FCOE_ERROR_CODE_XFER_OOO_RO 0 +#define FCOE_ERROR_CODE_XFER_RO_NOT_ALIGNED 1 +#define FCOE_ERROR_CODE_XFER_NULL_BURST_LEN 2 +#define FCOE_ERROR_CODE_XFER_RO_GREATER_THAN_DATA2TRNS 3 +#define FCOE_ERROR_CODE_XFER_INVALID_PAYLOAD_SIZE 4 +#define FCOE_ERROR_CODE_XFER_TASK_TYPE_NOT_WRITE 5 +#define FCOE_ERROR_CODE_XFER_PEND_XFER_SET 6 +#define FCOE_ERROR_CODE_XFER_OPENED_SEQ 7 +#define FCOE_ERROR_CODE_XFER_FCTL 8 + +/* FCP RSP error codes */ +#define FCOE_ERROR_CODE_FCP_RSP_BIDI_FLAGS_SET 9 +#define FCOE_ERROR_CODE_FCP_RSP_UNDERFLOW 10 +#define FCOE_ERROR_CODE_FCP_RSP_OVERFLOW 11 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_LENGTH_FIELD 12 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_SNS_FIELD 13 +#define FCOE_ERROR_CODE_FCP_RSP_INVALID_PAYLOAD_SIZE 14 +#define FCOE_ERROR_CODE_FCP_RSP_PEND_XFER_SET 15 +#define FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ 16 +#define FCOE_ERROR_CODE_FCP_RSP_FCTL 17 +#define FCOE_ERROR_CODE_FCP_RSP_LAST_SEQ_RESET 18 +#define FCOE_ERROR_CODE_FCP_RSP_CONF_REQ_NOT_SUPPORTED_YET 19 + +/* FCP DATA error codes */ +#define FCOE_ERROR_CODE_DATA_OOO_RO 20 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DEFINED_MAX_FRAME_SIZE 21 +#define FCOE_ERROR_CODE_DATA_EXCEEDS_DATA2TRNS 22 +#define FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET 23 +#define FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET 24 +#define FCOE_ERROR_CODE_DATA_EOFN_END_SEQ_SET 25 +#define FCOE_ERROR_CODE_DATA_EOFT_END_SEQ_RESET 26 +#define FCOE_ERROR_CODE_DATA_TASK_TYPE_NOT_READ 27 +#define FCOE_ERROR_CODE_DATA_FCTL 28 + +/* Middle path error codes */ +#define FCOE_ERROR_CODE_MIDPATH_INVALID_TYPE 29 +#define FCOE_ERROR_CODE_MIDPATH_SOFI3_SEQ_ACTIVE_SET 30 +#define FCOE_ERROR_CODE_MIDPATH_SOFN_SEQ_ACTIVE_RESET 31 +#define FCOE_ERROR_CODE_MIDPATH_EOFN_END_SEQ_SET 32 +#define FCOE_ERROR_CODE_MIDPATH_EOFT_END_SEQ_RESET 33 +#define FCOE_ERROR_CODE_MIDPATH_REPLY_FCTL 34 +#define FCOE_ERROR_CODE_MIDPATH_INVALID_REPLY 35 +#define FCOE_ERROR_CODE_MIDPATH_ELS_REPLY_RCTL 36 + +/* ABTS error codes */ +#define FCOE_ERROR_CODE_ABTS_REPLY_F_CTL 37 +#define FCOE_ERROR_CODE_ABTS_REPLY_DDF_RCTL_FIELD 38 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_BLS_RCTL 39 +#define FCOE_ERROR_CODE_ABTS_REPLY_INVALID_RCTL 40 +#define FCOE_ERROR_CODE_ABTS_REPLY_RCTL_GENERAL_MISMATCH 41 + +/* Common error codes */ +#define FCOE_ERROR_CODE_COMMON_MIDDLE_FRAME_WITH_PAD 42 +#define FCOE_ERROR_CODE_COMMON_SEQ_INIT_IN_TCE 43 +#define FCOE_ERROR_CODE_COMMON_FC_HDR_RX_ID_MISMATCH 44 +#define FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT 45 +#define FCOE_ERROR_CODE_COMMON_DATA_FC_HDR_FCP_TYPE_MISMATCH 46 +#define FCOE_ERROR_CODE_COMMON_DATA_NO_MORE_SGES 47 +#define FCOE_ERROR_CODE_COMMON_OPTIONAL_FC_HDR 48 +#define FCOE_ERROR_CODE_COMMON_READ_TCE_OX_ID_TOO_BIG 49 +#define FCOE_ERROR_CODE_COMMON_DATA_WAS_NOT_TRANSMITTED 50 + +/* Unsolicited Rx error codes */ +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_ELS 51 +#define FCOE_ERROR_CODE_UNSOLICITED_TYPE_NOT_BLS 52 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_ELS 53 +#define FCOE_ERROR_CODE_UNSOLICITED_FCTL_BLS 54 +#define FCOE_ERROR_CODE_UNSOLICITED_R_CTL 55 + +#define FCOE_ERROR_CODE_RW_TASK_DDF_RCTL_INFO_FIELD 56 +#define FCOE_ERROR_CODE_RW_TASK_INVALID_RCTL 57 +#define FCOE_ERROR_CODE_RW_TASK_RCTL_GENERAL_MISMATCH 58 + +/* Timer error codes */ +#define FCOE_ERROR_CODE_E_D_TOV_TIMER_EXPIRATION 60 +#define FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION 61 + + +#endif /* BNX2FC_CONSTANTS_H_ */ diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.c b/drivers/scsi/bnx2fc/bnx2fc_debug.c new file mode 100644 index 000000000..47ba3ba1e --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.c @@ -0,0 +1,84 @@ +/* bnx2fc_debug.c: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#include "bnx2fc.h" + +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_IO))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (io_req && io_req->port && io_req->port->lport && + io_req->port->lport->host) + shost_printk(KERN_INFO, io_req->port->lport->host, + PFX "xid:0x%x %pV", + io_req->xid, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_TGT))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host && + tgt->rport) + shost_printk(KERN_INFO, tgt->port->lport->host, + PFX "port:%x %pV", + tgt->rport->port_id, &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} + +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!(bnx2fc_debug_level & LOG_HBA))) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (lport && lport->host) + shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf); + else + pr_info("NULL %pV", &vaf); + + va_end(args); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_debug.h b/drivers/scsi/bnx2fc/bnx2fc_debug.h new file mode 100644 index 000000000..76717acee --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_debug.h @@ -0,0 +1,47 @@ +/* bnx2fc_debug.h: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + */ + +#ifndef __BNX2FC_DEBUG__ +#define __BNX2FC_DEBUG__ + +/* Log level bit mask */ +#define LOG_IO 0x01 /* scsi cmd error, cleanup */ +#define LOG_TGT 0x02 /* Session setup, cleanup, etc' */ +#define LOG_HBA 0x04 /* lport events, link, mtu, etc' */ +#define LOG_ELS 0x08 /* ELS logs */ +#define LOG_MISC 0x10 /* fcoe L2 frame related logs*/ +#define LOG_ALL 0xff /* LOG all messages */ + +extern unsigned int bnx2fc_debug_level; + +#define BNX2FC_ELS_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_ELS)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +#define BNX2FC_MISC_DBG(fmt, ...) \ +do { \ + if (unlikely(bnx2fc_debug_level & LOG_MISC)) \ + pr_info(fmt, ##__VA_ARGS__); \ +} while (0) + +__printf(2, 3) +void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...); +__printf(2, 3) +void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...); + +#endif diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c new file mode 100644 index 000000000..754f2e82d --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -0,0 +1,950 @@ +/* + * bnx2fc_els.c: QLogic Linux FCoE offload driver. + * This file contains helper routines that handle ELS requests + * and responses. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg); +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec); + +static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req; + struct bnx2fc_cmd *rrq_req; + int rc = 0; + + BUG_ON(!cb_arg); + rrq_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BUG_ON(!orig_io_req); + BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n", + orig_io_req->xid, rrq_req->xid); + + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. Remove from active_cmd_queue. + */ + BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n", + rrq_req->xid); + + if (rrq_req->on_active_queue) { + list_del_init(&rrq_req->link); + rrq_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(rrq_req); + BUG_ON(rc); + } + } + kfree(cb_arg); +} +int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req) +{ + + struct fc_els_rrq rrq; + struct bnx2fc_rport *tgt = aborted_io_req->tgt; + struct fc_lport *lport = NULL; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = 0; + u32 r_a_tov = 0; + unsigned long start = jiffies; + int rc; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) + return -EINVAL; + + lport = tgt->rdata->local_port; + sid = tgt->sid; + r_a_tov = lport->r_a_tov; + + BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n", + aborted_io_req->xid); + memset(&rrq, 0, sizeof(rrq)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n"); + rc = -ENOMEM; + goto rrq_err; + } + + cb_arg->aborted_io_req = aborted_io_req; + + rrq.rrq_cmd = ELS_RRQ; + hton24(rrq.rrq_s_id, sid); + rrq.rrq_ox_id = htons(aborted_io_req->xid); + rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id); + +retry_rrq: + rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq), + bnx2fc_rrq_compl, cb_arg, + r_a_tov); + if (rc == -ENOMEM) { + if (time_after(jiffies, start + (10 * HZ))) { + BNX2FC_ELS_DBG("rrq Failed\n"); + rc = FAILED; + goto rrq_err; + } + msleep(20); + goto retry_rrq; + } +rrq_err: + if (rc) { + BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n", + aborted_io_req->xid); + kfree(cb_arg); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + } + return rc; +} + +static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *els_req; + struct bnx2fc_rport *tgt; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u16 l2_oxid; + int frame_len; + int rc = 0; + + l2_oxid = cb_arg->l2_oxid; + BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid); + + els_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) { + /* + * els req is timed out. cleanup the IO with FW and + * drop the completion. libfc will handle the els timeout + */ + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + rc = bnx2fc_initiate_cleanup(els_req); + BUG_ON(rc); + } + goto free_arg; + } + + tgt = els_req->tgt; + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "Unable to alloc mp buf\n"); + goto free_arg; + } + hdr_len = sizeof(*fc_hdr); + if (hdr_len + resp_len > PAGE_SIZE) { + printk(KERN_ERR PFX "l2_els_compl: resp len is " + "beyond page size\n"); + goto free_buf; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + frame_len = hdr_len + resp_len; + + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid); + +free_buf: + kfree(buf); +free_arg: + kfree(cb_arg); +} + +int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_adisc *adisc; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + adisc = fc_frame_payload_get(fp, sizeof(*adisc)); + /* adisc is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_logo *logo; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid); + logo = fc_frame_payload_get(fp, sizeof(*logo)); + /* logo is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp) +{ + struct fc_els_rls *rls; + struct fc_frame_header *fh; + struct bnx2fc_els_cb_arg *cb_arg; + struct fc_lport *lport = tgt->rdata->local_port; + u32 r_a_tov = lport->r_a_tov; + int rc; + + fh = fc_frame_header_get(fp); + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n"); + return -ENOMEM; + } + + cb_arg->l2_oxid = ntohs(fh->fh_ox_id); + + rls = fc_frame_payload_get(fp, sizeof(*rls)); + /* rls is initialized by libfc */ + rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls), + bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov); + if (rc) + kfree(cb_arg); + return rc; +} + +static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct bnx2fc_cmd *srr_req; + struct bnx2fc_cmd *orig_io_req; + struct fc_frame *fp; + unsigned char *buf; + void *resp_buf; + u32 resp_len, hdr_len; + u8 opcode; + int rc = 0; + + orig_io_req = cb_arg->aborted_io_req; + srr_req = cb_arg->io_req; + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) { + /* SRR timedout */ + BNX2FC_IO_DBG(srr_req, "srr timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + rc = bnx2fc_initiate_abts(srr_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(srr_req); + } + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || + test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx", + orig_io_req->xid, orig_io_req->req_flags); + goto srr_compl_done; + } + orig_io_req->srr_retry++; + if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) { + struct bnx2fc_rport *tgt = orig_io_req->tgt; + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, + orig_io_req->srr_offset, + orig_io_req->srr_rctl); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto srr_compl_done; + } + + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto srr_compl_done; + } + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) || + test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx", + orig_io_req->xid, orig_io_req->req_flags); + goto srr_compl_done; + } + mp_req = &(srr_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "srr buf: mem alloc failure\n"); + goto srr_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + switch (opcode) { + case ELS_LS_ACC: + BNX2FC_IO_DBG(srr_req, "SRR success\n"); + break; + case ELS_LS_RJT: + BNX2FC_IO_DBG(srr_req, "SRR rejected\n"); + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + break; + default: + BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n", + opcode); + break; + } + fc_frame_free(fp); +free_buf: + kfree(buf); +srr_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +} + +static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) +{ + struct bnx2fc_cmd *orig_io_req, *new_io_req; + struct bnx2fc_cmd *rec_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr, *fh; + struct fc_els_ls_rjt *rjt; + struct fc_els_rec_acc *acc; + struct bnx2fc_rport *tgt; + struct fcoe_err_report_entry *err_entry; + struct scsi_cmnd *sc_cmd; + enum fc_rctl r_ctl; + unsigned char *buf; + void *resp_buf; + struct fc_frame *fp; + u8 opcode; + u32 offset; + u32 e_stat; + u32 resp_len, hdr_len; + int rc = 0; + bool send_seq_clnp = false; + bool abort_io = false; + + BNX2FC_MISC_DBG("Entered rec_compl callback\n"); + rec_req = cb_arg->io_req; + orig_io_req = cb_arg->aborted_io_req; + BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); + tgt = orig_io_req->tgt; + + /* Handle REC timeout case */ + if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "timed out, abort " + "orig_io - 0x%x\n", + orig_io_req->xid); + /* els req is timed out. send abts for els */ + rc = bnx2fc_initiate_abts(rec_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(rec_req); + } + orig_io_req->rec_retry++; + /* REC timedout. send ABTS to the orig IO req */ + if (orig_io_req->rec_retry <= REC_RETRY_COUNT) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(orig_io_req); + spin_lock_bh(&tgt->tgt_lock); + if (!rc) + goto rec_compl_done; + } + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + orig_io_req->xid); + bnx2fc_initiate_cleanup(orig_io_req); + } + goto rec_compl_done; + } + + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "completed" + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "abts in prog " + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + + mp_req = &(rec_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + resp_len = mp_req->resp_len; + acc = resp_buf = mp_req->resp_buf; + + hdr_len = sizeof(*fc_hdr); + + buf = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!buf) { + printk(KERN_ERR PFX "rec buf: mem alloc failure\n"); + goto rec_compl_done; + } + memcpy(buf, fc_hdr, hdr_len); + memcpy(buf + hdr_len, resp_buf, resp_len); + + fp = fc_frame_alloc(NULL, resp_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + goto free_buf; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, hdr_len + resp_len); + + opcode = fc_frame_payload_op(fp); + if (opcode == ELS_LS_RJT) { + BNX2FC_IO_DBG(rec_req, "opcode is RJT\n"); + rjt = fc_frame_payload_get(fp, sizeof(*rjt)); + if ((rjt->er_reason == ELS_RJT_LOGIC || + rjt->er_reason == ELS_RJT_UNAB) && + rjt->er_explan == ELS_EXPL_OXID_RXID) { + BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n"); + new_io_req = bnx2fc_cmd_alloc(tgt); + if (!new_io_req) + goto abort_io; + new_io_req->sc_cmd = orig_io_req->sc_cmd; + /* cleanup orig_io_req that is with the FW */ + set_bit(BNX2FC_FLAG_CMD_LOST, + &orig_io_req->req_flags); + bnx2fc_initiate_cleanup(orig_io_req); + /* Post a new IO req with the same sc_cmd */ + BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); + rc = bnx2fc_post_io_req(tgt, new_io_req); + if (!rc) + goto free_frame; + BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); + } +abort_io: + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts " + "failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (opcode == ELS_LS_ACC) { + /* REVISIT: Check if the exchange is already aborted */ + offset = ntohl(acc->reca_fc4value); + e_stat = ntohl(acc->reca_e_stat); + if (e_stat & ESB_ST_SEQ_INIT) { + BNX2FC_IO_DBG(rec_req, "target has the seq init\n"); + goto free_frame; + } + BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n", + e_stat, offset); + /* Seq initiative is with us */ + err_entry = (struct fcoe_err_report_entry *) + &orig_io_req->err_entry; + sc_cmd = orig_io_req->sc_cmd; + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + /* SCSI WRITE command */ + if (offset == orig_io_req->data_xfer_len) { + BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n"); + /* FCP_RSP lost */ + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* start transmitting from offset */ + BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n"); + send_seq_clnp = true; + r_ctl = FC_RCTL_DD_DATA_DESC; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + /* XFER_RDY */ + } + } else { + /* SCSI READ command */ + if (err_entry->data.rx_buf_off == + orig_io_req->data_xfer_len) { + /* FCP_RSP lost */ + BNX2FC_IO_DBG(rec_req, "READ - resp lost\n"); + r_ctl = FC_RCTL_DD_CMD_STATUS; + offset = 0; + } else { + /* request retransmission from this offset */ + send_seq_clnp = true; + offset = err_entry->data.rx_buf_off; + BNX2FC_IO_DBG(rec_req, "RD DATA lost\n"); + /* FCP_DATA lost */ + r_ctl = FC_RCTL_DD_SOL_DATA; + if (bnx2fc_initiate_seq_cleanup(orig_io_req, + offset, r_ctl)) + abort_io = true; + } + } + if (abort_io) { + rc = bnx2fc_initiate_abts(orig_io_req); + if (rc != SUCCESS) { + BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts" + " failed. issue cleanup\n"); + bnx2fc_initiate_cleanup(orig_io_req); + } + } else if (!send_seq_clnp) { + BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n"); + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) { + BNX2FC_IO_DBG(rec_req, "Unable to send SRR" + " IO will abort\n"); + } + } + } +free_frame: + fc_frame_free(fp); +free_buf: + kfree(buf); +rec_compl_done: + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + kfree(cb_arg); +} + +int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req) +{ + struct fc_els_rec rec; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 sid = tgt->sid; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending REC\n"); + memset(&rec, 0, sizeof(rec)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n"); + rc = -ENOMEM; + goto rec_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + rec.rec_cmd = ELS_REC; + hton24(rec.rec_s_id, sid); + rec.rec_ox_id = htons(orig_io_req->xid); + rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + + rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec), + bnx2fc_rec_compl, cb_arg, + r_a_tov); + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } +rec_err: + return rc; +} + +int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl) +{ + struct fcp_srr srr; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct fc_lport *lport = tgt->rdata->local_port; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + u32 r_a_tov = lport->r_a_tov; + int rc; + + BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n"); + memset(&srr, 0, sizeof(srr)); + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n"); + rc = -ENOMEM; + goto srr_err; + } + kref_get(&orig_io_req->refcount); + + cb_arg->aborted_io_req = orig_io_req; + + srr.srr_op = ELS_SRR; + srr.srr_ox_id = htons(orig_io_req->xid); + srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id); + srr.srr_rel_off = htonl(offset); + srr.srr_r_ctl = r_ctl; + orig_io_req->srr_offset = offset; + orig_io_req->srr_rctl = r_ctl; + + rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr), + bnx2fc_srr_compl, cb_arg, + r_a_tov); + if (rc) { + BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + kfree(cb_arg); + } else + set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags); + +srr_err: + return rc; +} + +static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, + void *data, u32 data_len, + void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), + struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct fc_rport *rport = tgt->rport; + struct fc_lport *lport = port->lport; + struct bnx2fc_cmd *els_req; + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int rc = 0; + int task_idx, index; + u32 did, sid; + u16 xid; + + rc = fc_remote_port_chkready(rport); + if (rc) { + printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); + rc = -EINVAL; + goto els_err; + } + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); + rc = -EINVAL; + goto els_err; + } + els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); + if (!els_req) { + rc = -ENOMEM; + goto els_err; + } + + els_req->sc_cmd = NULL; + els_req->port = port; + els_req->tgt = tgt; + els_req->cb_func = cb_func; + cb_arg->io_req = els_req; + els_req->cb_arg = cb_arg; + els_req->data_xfer_len = data_len; + + mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); + rc = bnx2fc_init_mp_req(els_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "ELS MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -ENOMEM; + goto els_err; + } else { + /* rc SUCCESS */ + rc = 0; + } + + /* Set the data_xfer_len to the size of ELS payload */ + mp_req->req_len = data_len; + els_req->data_xfer_len = mp_req->req_len; + + /* Fill ELS Payload */ + if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { + memcpy(mp_req->req_buf, data, data_len); + } else { + printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + spin_lock_bh(&tgt->tgt_lock); + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + rc = -EINVAL; + } + + if (rc) + goto els_err; + + /* Fill FC header */ + fc_hdr = &(mp_req->req_fc_hdr); + + did = tgt->rport->port_id; + sid = tgt->sid; + + if (op == ELS_SRR) + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + else + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, + FC_TYPE_ELS, FC_FC_FIRST_SEQ | + FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); + + /* Obtain exchange id */ + xid = els_req->xid; + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(els_req, task); + + spin_lock_bh(&tgt->tgt_lock); + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_els.. session not ready\n"); + els_req->cb_func = NULL; + els_req->cb_arg = NULL; + kref_put(&els_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EINVAL; + } + + if (timer_msec) + bnx2fc_cmd_timer_set(els_req, timer_msec); + bnx2fc_add_2_sq(tgt, xid); + + els_req->on_active_queue = 1; + list_add_tail(&els_req->link, &tgt->els_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + +els_err: + return rc; +} + +void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req, + struct fcoe_task_ctx_entry *task, u8 num_rq) +{ + struct bnx2fc_mp_req *mp_req; + struct fc_frame_header *fc_hdr; + u64 *hdr; + u64 *temp_hdr; + + BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x" + "cmd_type = %d\n", els_req->xid, els_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &els_req->req_flags)) { + BNX2FC_ELS_DBG("Timer context finished processing this " + "els - 0x%x\n", els_req->xid); + /* This IO doesn't receive cleanup completion */ + kref_put(&els_req->refcount, bnx2fc_cmd_release); + return; + } + + /* Cancel the timeout_work, as we received the response */ + if (cancel_delayed_work(&els_req->timeout_work)) + kref_put(&els_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if (els_req->on_active_queue) { + list_del_init(&els_req->link); + els_req->on_active_queue = 0; + } + + mp_req = &(els_req->mp_req); + fc_hdr = &(mp_req->resp_fc_hdr); + + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + mp_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; + + /* Parse ELS response */ + if ((els_req->cb_func) && (els_req->cb_arg)) { + els_req->cb_func(els_req->cb_arg); + els_req->cb_arg = NULL; + } + + kref_put(&els_req->refcount, bnx2fc_cmd_release); +} + +#define BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC 1 +#define BNX2FC_FCOE_MAC_METHOD_FCF_MAP 2 +#define BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC 3 +static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + + struct fc_frame_header *fh; + u8 *granted_mac; + u8 fcoe_mac[6]; + u8 fc_map[3]; + int method; + + if (IS_ERR(fp)) + goto done; + + fh = fc_frame_header_get(fp); + granted_mac = fr_cb(fp)->granted_mac; + + /* + * We set the source MAC for FCoE traffic based on the Granted MAC + * address from the switch. + * + * If granted_mac is non-zero, we use that. + * If the granted_mac is zeroed out, create the FCoE MAC based on + * the sel_fcf->fc_map and the d_id fo the FLOGI frame. + * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the + * d_id of the FLOGI frame. + */ + if (!is_zero_ether_addr(granted_mac)) { + ether_addr_copy(fcoe_mac, granted_mac); + method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC; + } else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) { + hton24(fc_map, fip->sel_fcf->fc_map); + fcoe_mac[0] = fc_map[0]; + fcoe_mac[1] = fc_map[1]; + fcoe_mac[2] = fc_map[2]; + fcoe_mac[3] = fh->fh_d_id[0]; + fcoe_mac[4] = fh->fh_d_id[1]; + fcoe_mac[5] = fh->fh_d_id[2]; + method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP; + } else { + fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id); + method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC; + } + + BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method); + fip->update_mac(lport, fcoe_mac); +done: + fc_lport_flogi_resp(seq, fp, lport); +} + +static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp, + void *arg) +{ + struct fcoe_ctlr *fip = arg; + struct fc_exch *exch = fc_seq_exch(seq); + struct fc_lport *lport = exch->lp; + static u8 zero_mac[ETH_ALEN] = { 0 }; + + if (!IS_ERR(fp)) + fip->update_mac(lport, zero_mac); + fc_lport_logo_resp(seq, fp, lport); +} + +struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did, + struct fc_frame *fp, unsigned int op, + void (*resp)(struct fc_seq *, + struct fc_frame *, + void *), + void *arg, u32 timeout) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface); + struct fc_frame_header *fh = fc_frame_header_get(fp); + + switch (op) { + case ELS_FLOGI: + case ELS_FDISC: + return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp, + fip, timeout); + case ELS_LOGO: + /* only hook onto fabric logouts, not port logouts */ + if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI) + break; + return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp, + fip, timeout); + } + return fc_elsct_send(lport, did, fp, op, resp, arg, timeout); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c new file mode 100644 index 000000000..451a58e0f --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -0,0 +1,2989 @@ +/* bnx2fc_fcoe.c: QLogic Linux FCoE offload driver. + * This file contains the code that interacts with libfc, libfcoe, + * cnic modules to create FCoE instances, send/receive non-offloaded + * FIP/FCoE packets, listen to link events etc. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +#include <linux/ethtool.h> + +static struct list_head adapter_list; +static struct list_head if_list; +static u32 adapter_count; +static DEFINE_MUTEX(bnx2fc_dev_lock); +DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +#define DRV_MODULE_NAME "bnx2fc" +#define DRV_MODULE_VERSION BNX2FC_VERSION +#define DRV_MODULE_RELDATE "October 15, 2015" + + +static char version[] = + "QLogic FCoE Driver " DRV_MODULE_NAME \ + " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; + + +MODULE_AUTHOR("Bhanu Prakash Gollapudi <bprakash@broadcom.com>"); +MODULE_DESCRIPTION("QLogic FCoE Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +#define BNX2FC_MAX_QUEUE_DEPTH 256 +#define BNX2FC_MIN_QUEUE_DEPTH 32 +#define FCOE_WORD_TO_BYTE 4 + +static struct scsi_transport_template *bnx2fc_transport_template; +static struct scsi_transport_template *bnx2fc_vport_xport_template; + +struct workqueue_struct *bnx2fc_wq; + +/* bnx2fc structure needs only one instance of the fcoe_percpu_s structure. + * Here the io threads are per cpu but the l2 thread is just one + */ +struct fcoe_percpu_s bnx2fc_global; +static DEFINE_SPINLOCK(bnx2fc_global_lock); + +static struct cnic_ulp_ops bnx2fc_cnic_cb; +static struct libfc_function_template bnx2fc_libfc_fcn_templ; +static struct scsi_host_template bnx2fc_shost_template; +static struct fc_function_template bnx2fc_transport_function; +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ; +static struct fc_function_template bnx2fc_vport_xport_function; +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode); +static void __bnx2fc_destroy(struct bnx2fc_interface *interface); +static int bnx2fc_destroy(struct net_device *net_device); +static int bnx2fc_enable(struct net_device *netdev); +static int bnx2fc_disable(struct net_device *netdev); + +/* fcoe_syfs control interface handlers */ +static int bnx2fc_ctlr_alloc(struct net_device *netdev); +static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev); + +static void bnx2fc_recv_frame(struct sk_buff *skb); + +static void bnx2fc_start_disc(struct bnx2fc_interface *interface); +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); +static int bnx2fc_lport_config(struct fc_lport *lport); +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba); +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba); +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba); +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba); +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, + struct device *parent, int npiv); +static void bnx2fc_port_destroy(struct fcoe_port *port); + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *phys_dev); +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba); +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba); + +static void bnx2fc_port_shutdown(struct fc_lport *lport); +static void bnx2fc_stop(struct bnx2fc_interface *interface); +static int __init bnx2fc_mod_init(void); +static void __exit bnx2fc_mod_exit(void); + +unsigned int bnx2fc_debug_level; +module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(debug_logging, + "Option to enable extended logging,\n" + "\t\tDefault is 0 - no logging.\n" + "\t\t0x01 - SCSI cmd error, cleanup.\n" + "\t\t0x02 - Session setup, cleanup, etc.\n" + "\t\t0x04 - lport events, link, mtu, etc.\n" + "\t\t0x08 - ELS logs.\n" + "\t\t0x10 - fcoe L2 fame related logs.\n" + "\t\t0xff - LOG all messages."); + +static uint bnx2fc_devloss_tmo; +module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO); +MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports " + "attached via bnx2fc."); + +static uint bnx2fc_max_luns = BNX2FC_MAX_LUN; +module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO); +MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default " + "0xffff."); + +static uint bnx2fc_queue_depth; +module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO); +MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices " + "attached via bnx2fc."); + +static uint bnx2fc_log_fka; +module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is " + "initiating a FIP keep alive when debug logging is enabled."); + +static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) +{ + return ((struct bnx2fc_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; +} + +static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev) +{ + struct fcoe_ctlr_device *ctlr_dev = + fcoe_fcf_dev_to_ctlr_dev(fcf_dev); + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev); + struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr); + + fcf_dev->vlan_id = fcoe->vlan_id; +} + +static void bnx2fc_clean_rx_queue(struct fc_lport *lp) +{ + struct fcoe_percpu_s *bg; + struct fcoe_rcv_info *fr; + struct sk_buff_head *list; + struct sk_buff *skb, *next; + + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + list = &bg->fcoe_rx_list; + skb_queue_walk_safe(list, skb, next) { + fr = fcoe_dev_from_skb(skb); + if (fr->fr_dev == lp) { + __skb_unlink(skb, list); + kfree_skb(skb); + } + } + spin_unlock_bh(&bg->fcoe_rx_list.lock); +} + +int bnx2fc_get_paged_crc_eof(struct sk_buff *skb, int tlen) +{ + int rc; + spin_lock(&bnx2fc_global_lock); + rc = fcoe_get_paged_crc_eof(skb, tlen, &bnx2fc_global); + spin_unlock(&bnx2fc_global_lock); + + return rc; +} + +static void bnx2fc_abort_io(struct fc_lport *lport) +{ + /* + * This function is no-op for bnx2fc, but we do + * not want to leave it as NULL either, as libfc + * can call the default function which is + * fc_fcp_abort_io. + */ +} + +static void bnx2fc_cleanup(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_rport *tgt; + int i; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + mutex_lock(&hba->hba_mutex); + spin_lock_bh(&hba->hba_lock); + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if (tgt) { + /* Cleanup IOs belonging to requested vport */ + if (tgt->port == port) { + spin_unlock_bh(&hba->hba_lock); + BNX2FC_TGT_DBG(tgt, "flush/cleanup\n"); + bnx2fc_flush_active_ios(tgt); + spin_lock_bh(&hba->hba_lock); + } + } + } + spin_unlock_bh(&hba->hba_lock); + mutex_unlock(&hba->hba_mutex); +} + +static int bnx2fc_xmit_l2_frame(struct bnx2fc_rport *tgt, + struct fc_frame *fp) +{ + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_frame_header *fh; + int rc = 0; + + fh = fc_frame_header_get(fp); + BNX2FC_TGT_DBG(tgt, "Xmit L2 frame rport = 0x%x, oxid = 0x%x, " + "r_ctl = 0x%x\n", rdata->ids.port_id, + ntohs(fh->fh_ox_id), fh->fh_r_ctl); + if ((fh->fh_type == FC_TYPE_ELS) && + (fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + + switch (fc_frame_payload_op(fp)) { + case ELS_ADISC: + rc = bnx2fc_send_adisc(tgt, fp); + break; + case ELS_LOGO: + rc = bnx2fc_send_logo(tgt, fp); + break; + case ELS_RLS: + rc = bnx2fc_send_rls(tgt, fp); + break; + default: + break; + } + } else if ((fh->fh_type == FC_TYPE_BLS) && + (fh->fh_r_ctl == FC_RCTL_BA_ABTS)) + BNX2FC_TGT_DBG(tgt, "ABTS frame\n"); + else { + BNX2FC_TGT_DBG(tgt, "Send L2 frame type 0x%x " + "rctl 0x%x thru non-offload path\n", + fh->fh_type, fh->fh_r_ctl); + return -ENODEV; + } + if (rc) + return -ENOMEM; + else + return 0; +} + +/** + * bnx2fc_xmit - bnx2fc's FCoE frame transmit function + * + * @lport: the associated local port + * @fp: the fc_frame to be transmitted + */ +static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) +{ + struct ethhdr *eh; + struct fcoe_crc_eof *cp; + struct sk_buff *skb; + struct fc_frame_header *fh; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct bnx2fc_hba *hba; + struct fcoe_port *port; + struct fcoe_hdr *hp; + struct bnx2fc_rport *tgt; + u8 sof, eof; + u32 crc; + unsigned int hlen, tlen, elen; + int wlen, rc = 0; + + port = (struct fcoe_port *)lport_priv(lport); + interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); + hba = interface->hba; + + fh = fc_frame_header_get(fp); + + skb = fp_skb(fp); + if (!lport->link_up) { + BNX2FC_HBA_DBG(lport, "bnx2fc_xmit link down\n"); + kfree_skb(skb); + return 0; + } + + if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { + if (!ctlr->sel_fcf) { + BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); + kfree_skb(skb); + return -EINVAL; + } + if (fcoe_ctlr_els_send(ctlr, lport, skb)) + return 0; + } + + sof = fr_sof(fp); + eof = fr_eof(fp); + + /* + * Snoop the frame header to check if the frame is for + * an offloaded session + */ + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + tgt = bnx2fc_tgt_lookup(port, ntoh24(fh->fh_d_id)); + if (tgt && (test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + /* This frame is for offloaded session */ + BNX2FC_HBA_DBG(lport, "xmit: Frame is for offloaded session " + "port_id = 0x%x\n", ntoh24(fh->fh_d_id)); + spin_unlock_bh(&hba->hba_lock); + rc = bnx2fc_xmit_l2_frame(tgt, fp); + if (rc != -ENODEV) { + kfree_skb(skb); + return rc; + } + } else { + spin_unlock_bh(&hba->hba_lock); + } + + elen = sizeof(struct ethhdr); + hlen = sizeof(struct fcoe_hdr); + tlen = sizeof(struct fcoe_crc_eof); + wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE; + + skb->ip_summed = CHECKSUM_NONE; + crc = fcoe_fc_crc(fp); + + /* copy port crc and eof to the skb buff */ + if (skb_is_nonlinear(skb)) { + skb_frag_t *frag; + if (bnx2fc_get_paged_crc_eof(skb, tlen)) { + kfree_skb(skb); + return -ENOMEM; + } + frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); + } else { + cp = skb_put(skb, tlen); + } + + memset(cp, 0, sizeof(*cp)); + cp->fcoe_eof = eof; + cp->fcoe_crc32 = cpu_to_le32(~crc); + if (skb_is_nonlinear(skb)) { + kunmap_atomic(cp); + cp = NULL; + } + + /* adjust skb network/transport offsets to match mac/fcoe/port */ + skb_push(skb, elen + hlen); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb->mac_len = elen; + skb->protocol = htons(ETH_P_FCOE); + skb->dev = interface->netdev; + + /* fill up mac and fcoe headers */ + eh = eth_hdr(skb); + eh->h_proto = htons(ETH_P_FCOE); + if (ctlr->map_dest) + fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); + else + /* insert GW address */ + memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN); + + if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN)) + memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN); + else + memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); + + hp = (struct fcoe_hdr *)(eh + 1); + memset(hp, 0, sizeof(*hp)); + if (FC_FCOE_VER) + FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER); + hp->fcoe_sof = sof; + + /* fcoe lso, mss is in max_payload which is non-zero for FCP data */ + if (lport->seq_offload && fr_max_payload(fp)) { + skb_shinfo(skb)->gso_type = SKB_GSO_FCOE; + skb_shinfo(skb)->gso_size = fr_max_payload(fp); + } else { + skb_shinfo(skb)->gso_type = 0; + skb_shinfo(skb)->gso_size = 0; + } + + /*update tx stats */ + this_cpu_inc(lport->stats->TxFrames); + this_cpu_add(lport->stats->TxWords, wlen); + + /* send down to lld */ + fr_dev(fp) = lport; + if (port->fcoe_pending_queue.qlen) + fcoe_check_wait_queue(lport, skb); + else if (fcoe_start_io(skb)) + fcoe_check_wait_queue(lport, skb); + + return 0; +} + +/** + * bnx2fc_rcv - This is bnx2fc's receive function called by NET_RX_SOFTIRQ + * + * @skb: the receive socket buffer + * @dev: associated net device + * @ptype: context + * @olddev: last device + * + * This function receives the packet and builds FC frame and passes it up + */ +static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *olddev) +{ + struct fc_lport *lport; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fcoe_rcv_info *fr; + struct fcoe_percpu_s *bg; + + interface = container_of(ptype, struct bnx2fc_interface, + fcoe_packet_type); + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + + if (unlikely(lport == NULL)) { + printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); + goto err; + } + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return -1; + + if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { + printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); + goto err; + } + + /* + * Check for minimum frame length, and make sure required FCoE + * and FC headers are pulled into the linear data area. + */ + if (unlikely((skb->len < FCOE_MIN_FRAME) || + !pskb_may_pull(skb, FCOE_HEADER_LEN))) + goto err; + + skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); + + fr = fcoe_dev_from_skb(skb); + fr->fr_dev = lport; + + bg = &bnx2fc_global; + spin_lock(&bg->fcoe_rx_list.lock); + + __skb_queue_tail(&bg->fcoe_rx_list, skb); + if (bg->fcoe_rx_list.qlen == 1) + wake_up_process(bg->kthread); + + spin_unlock(&bg->fcoe_rx_list.lock); + + return 0; +err: + kfree_skb(skb); + return -1; +} + +static int bnx2fc_l2_rcv_thread(void *arg) +{ + struct fcoe_percpu_s *bg = arg; + struct sk_buff *skb; + + set_user_nice(current, MIN_NICE); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + spin_lock_bh(&bg->fcoe_rx_list.lock); + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) { + spin_unlock_bh(&bg->fcoe_rx_list.lock); + bnx2fc_recv_frame(skb); + spin_lock_bh(&bg->fcoe_rx_list.lock); + } + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&bg->fcoe_rx_list.lock); + } + __set_current_state(TASK_RUNNING); + return 0; +} + + +static void bnx2fc_recv_frame(struct sk_buff *skb) +{ + u64 crc_err; + u32 fr_len, fr_crc; + struct fc_lport *lport; + struct fcoe_rcv_info *fr; + struct fc_frame_header *fh; + struct fcoe_crc_eof crc_eof; + struct fc_frame *fp; + struct fc_lport *vn_port; + struct fcoe_port *port, *phys_port; + u8 *mac = NULL; + u8 *dest_mac = NULL; + struct fcoe_hdr *hp; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + + fr = fcoe_dev_from_skb(skb); + lport = fr->fr_dev; + if (unlikely(lport == NULL)) { + printk(KERN_ERR PFX "Invalid lport struct\n"); + kfree_skb(skb); + return; + } + + if (skb_is_nonlinear(skb)) + skb_linearize(skb); + mac = eth_hdr(skb)->h_source; + dest_mac = eth_hdr(skb)->h_dest; + + /* Pull the header */ + hp = (struct fcoe_hdr *) skb_network_header(skb); + fh = (struct fc_frame_header *) skb_transport_header(skb); + skb_pull(skb, sizeof(struct fcoe_hdr)); + fr_len = skb->len - sizeof(struct fcoe_crc_eof); + + this_cpu_inc(lport->stats->RxFrames); + this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); + + fp = (struct fc_frame *)skb; + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = hp->fcoe_sof; + if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + kfree_skb(skb); + return; + } + fr_eof(fp) = crc_eof.fcoe_eof; + fr_crc(fp) = crc_eof.fcoe_crc32; + if (pskb_trim(skb, fr_len)) { + kfree_skb(skb); + return; + } + + phys_port = lport_priv(lport); + interface = phys_port->priv; + ctlr = bnx2fc_to_ctlr(interface); + + fh = fc_frame_header_get(fp); + + if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) { + BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n", + dest_mac); + kfree_skb(skb); + return; + } + + vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id)); + if (vn_port) { + port = lport_priv(vn_port); + if (!ether_addr_equal(port->data_src_addr, dest_mac)) { + BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); + kfree_skb(skb); + return; + } + } + if (ctlr->state) { + if (!ether_addr_equal(mac, ctlr->dest_addr)) { + BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n", + mac, ctlr->dest_addr); + kfree_skb(skb); + return; + } + } + if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && + fh->fh_type == FC_TYPE_FCP) { + /* Drop FCP data. We dont this in L2 path */ + kfree_skb(skb); + return; + } + if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && + fh->fh_type == FC_TYPE_ELS) { + switch (fc_frame_payload_op(fp)) { + case ELS_LOGO: + if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { + /* drop non-FIP LOGO */ + kfree_skb(skb); + return; + } + break; + } + } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + kfree_skb(skb); + return; + } + + /* + * If the destination ID from the frame header does not match what we + * have on record for lport and the search for a NPIV port came up + * empty then this is not addressed to our port so simply drop it. + */ + if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { + BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); + kfree_skb(skb); + return; + } + + fr_crc = le32_to_cpu(fr_crc(fp)); + + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount); + if (crc_err < 5) + printk(KERN_WARNING PFX "dropping frame with " + "CRC error\n"); + kfree_skb(skb); + return; + } + fc_exch_recv(lport, fp); +} + +/** + * bnx2fc_percpu_io_thread - thread per cpu for ios + * + * @arg: ptr to bnx2fc_percpu_info structure + */ +static int bnx2fc_percpu_io_thread(void *arg) +{ + struct bnx2fc_percpu_s *p = arg; + struct bnx2fc_work *work, *tmp; + LIST_HEAD(work_list); + + set_user_nice(current, MIN_NICE); + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + spin_lock_bh(&p->fp_work_lock); + while (!list_empty(&p->work_list)) { + list_splice_init(&p->work_list, &work_list); + spin_unlock_bh(&p->fp_work_lock); + + list_for_each_entry_safe(work, tmp, &work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe, + work->rq_data, + work->num_rq, + work->task); + kfree(work); + } + + spin_lock_bh(&p->fp_work_lock); + } + __set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_bh(&p->fp_work_lock); + } + __set_current_state(TASK_RUNNING); + + return 0; +} + +static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) +{ + struct fc_host_statistics *bnx2fc_stats; + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_statistics_params *fw_stats; + int rc = 0; + + fw_stats = (struct fcoe_statistics_params *)hba->stats_buffer; + if (!fw_stats) + return NULL; + + mutex_lock(&hba->hba_stats_mutex); + + bnx2fc_stats = fc_get_host_stats(shost); + + init_completion(&hba->stat_req_done); + if (bnx2fc_send_stat_req(hba)) + goto unlock_stats_mutex; + rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); + if (!rc) { + BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); + goto unlock_stats_mutex; + } + BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); + bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; + BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt); + bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt; + BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt); + bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4); + BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt); + bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt; + BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt); + bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4); + + bnx2fc_stats->dumped_frames = 0; + bnx2fc_stats->lip_count = 0; + bnx2fc_stats->nos_count = 0; + bnx2fc_stats->loss_of_sync_count = 0; + bnx2fc_stats->loss_of_signal_count = 0; + bnx2fc_stats->prim_seq_protocol_err_count = 0; + + memcpy(&hba->prev_stats, hba->stats_buffer, + sizeof(struct fcoe_statistics_params)); + +unlock_stats_mutex: + mutex_unlock(&hba->hba_stats_mutex); + return bnx2fc_stats; +} + +static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct Scsi_Host *shost = lport->host; + int rc = 0; + + shost->max_cmd_len = BNX2FC_MAX_CMD_LEN; + shost->max_lun = bnx2fc_max_luns; + shost->max_id = BNX2FC_MAX_FCP_TGT; + shost->max_channel = 0; + if (lport->vport) + shost->transportt = bnx2fc_vport_xport_template; + else + shost->transportt = bnx2fc_transport_template; + + /* Add the new host to SCSI-ml */ + rc = scsi_add_host(lport->host, dev); + if (rc) { + printk(KERN_ERR PFX "Error on scsi_add_host\n"); + return rc; + } + if (!lport->vport) + fc_host_max_npiv_vports(lport->host) = USHRT_MAX; + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (QLogic %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, + interface->netdev->name); + + return 0; +} + +static int bnx2fc_link_ok(struct fc_lport *lport) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct net_device *dev = hba->phys_dev; + int rc = 0; + + if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else { + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + rc = -1; + } + return rc; +} + +/** + * bnx2fc_get_link_state - get network link state + * + * @hba: adapter instance pointer + * + * updates adapter structure flag based on netdev state + */ +void bnx2fc_get_link_state(struct bnx2fc_hba *hba) +{ + if (test_bit(__LINK_STATE_NOCARRIER, &hba->phys_dev->state)) + set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); + else + clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); +} + +static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) +{ + struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fcoe_port *port; + u64 wwnn, wwpn; + + port = lport_priv(lport); + interface = port->priv; + ctlr = bnx2fc_to_ctlr(interface); + hba = interface->hba; + + /* require support for get_pauseparam ethtool op. */ + if (!hba->phys_dev->ethtool_ops || + !hba->phys_dev->ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (fc_set_mfs(lport, BNX2FC_MFS)) + return -EINVAL; + + skb_queue_head_init(&port->fcoe_pending_queue); + port->fcoe_pending_queue_active = 0; + timer_setup(&port->timer, fcoe_queue_timer, 0); + + fcoe_link_speed_update(lport); + + if (!lport->vport) { + if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) + wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, + 1, 0); + BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); + fc_set_wwnn(lport, wwnn); + + if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) + wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, + 2, 0); + + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); + fc_set_wwpn(lport, wwpn); + } + + return 0; +} + +static void bnx2fc_destroy_timer(struct timer_list *t) +{ + struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); + + printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + wake_up_interruptible(&hba->destroy_wait); +} + +/** + * bnx2fc_indicate_netevent - Generic netdev event handler + * + * @context: adapter structure pointer + * @event: event type + * @vlan_id: vlan id - associated vlan id with this event + * + * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and + * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. + */ +static void bnx2fc_indicate_netevent(void *context, unsigned long event, + u16 vlan_id) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + struct fcoe_ctlr_device *cdev; + struct fc_lport *lport; + struct fc_lport *vport; + struct bnx2fc_interface *interface, *tmp; + struct fcoe_ctlr *ctlr; + int wait_for_upload = 0; + u32 link_possible = 1; + + if (vlan_id != 0 && event != NETDEV_UNREGISTER) + return; + + switch (event) { + case NETDEV_UP: + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) + printk(KERN_ERR "indicate_netevent: "\ + "hba is not UP!!\n"); + break; + + case NETDEV_DOWN: + clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_GOING_DOWN: + set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state); + link_possible = 0; + break; + + case NETDEV_CHANGE: + break; + + case NETDEV_UNREGISTER: + if (!vlan_id) + return; + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry_safe(interface, tmp, &if_list, list) { + if (interface->hba == hba && + interface->vlan_id == (vlan_id & VLAN_VID_MASK)) + __bnx2fc_destroy(interface); + } + mutex_unlock(&bnx2fc_dev_lock); + return; + + default: + return; + } + + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry(interface, &if_list, list) { + + if (interface->hba != hba) + continue; + + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", + interface->netdev->name, event); + + fcoe_link_speed_update(lport); + + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + + if (link_possible && !bnx2fc_link_ok(lport)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link up while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + /* Reset max recv frame size to default */ + fc_set_mfs(lport, BNX2FC_MFS); + /* + * ctlr link up will only be handled during + * enable to avoid sending discovery + * solicitation on a stale vlan + */ + if (interface->enabled) + fcoe_ctlr_link_up(ctlr); + } + } else if (fcoe_ctlr_link_down(ctlr)) { + switch (cdev->enabled) { + case FCOE_CTLR_DISABLED: + pr_info("Link down while interface is disabled.\n"); + break; + case FCOE_CTLR_ENABLED: + case FCOE_CTLR_UNUSED: + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = + FC_PORTTYPE_UNKNOWN; + this_cpu_inc(lport->stats->LinkFailureCount); + fcoe_clean_pending_queue(lport); + wait_for_upload = 1; + } + } + } + mutex_unlock(&bnx2fc_dev_lock); + + if (wait_for_upload) { + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + init_waitqueue_head(&hba->shutdown_wait); + BNX2FC_MISC_DBG("indicate_netevent " + "num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 1; + wait_event_interruptible(hba->shutdown_wait, + (hba->num_ofld_sess == 0)); + BNX2FC_MISC_DBG("wakeup - num_ofld_sess = %d\n", + hba->num_ofld_sess); + hba->wait_for_link_down = 0; + + if (signal_pending(current)) + flush_signals(current); + } +} + +static int bnx2fc_libfc_config(struct fc_lport *lport) +{ + + /* Set the function pointers set by bnx2fc driver */ + memcpy(&lport->tt, &bnx2fc_libfc_fcn_templ, + sizeof(struct libfc_function_template)); + fc_elsct_init(lport); + fc_exch_init(lport); + fc_disc_init(lport); + fc_disc_config(lport, lport); + return 0; +} + +static int bnx2fc_em_config(struct fc_lport *lport, struct bnx2fc_hba *hba) +{ + int fcoe_min_xid, fcoe_max_xid; + + fcoe_min_xid = hba->max_xid + 1; + if (nr_cpu_ids <= 2) + fcoe_max_xid = hba->max_xid + FCOE_XIDS_PER_CPU_OFFSET; + else + fcoe_max_xid = hba->max_xid + FCOE_MAX_XID_OFFSET; + if (!fc_exch_mgr_alloc(lport, FC_CLASS_3, fcoe_min_xid, + fcoe_max_xid, NULL)) { + printk(KERN_ERR PFX "em_config:fc_exch_mgr_alloc failed\n"); + return -ENOMEM; + } + + return 0; +} + +static int bnx2fc_lport_config(struct fc_lport *lport) +{ + lport->link_up = 0; + lport->qfull = 0; + lport->max_retry_count = BNX2FC_MAX_RETRY_CNT; + lport->max_rport_retry_count = BNX2FC_MAX_RPORT_RETRY_CNT; + lport->e_d_tov = 2 * 1000; + lport->r_a_tov = 10 * 1000; + + lport->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | + FCP_SPPF_RETRY | FCP_SPPF_CONF_COMPL); + lport->does_npiv = 1; + + memset(&lport->rnid_gen, 0, sizeof(struct fc_els_rnid_gen)); + lport->rnid_gen.rnid_atype = BNX2FC_RNID_HBA; + + /* alloc stats structure */ + if (fc_lport_init_stats(lport)) + return -ENOMEM; + + /* Finish fc_lport configuration */ + fc_lport_config(lport); + + return 0; +} + +/** + * bnx2fc_fip_recv - handle a received FIP frame. + * + * @skb: the received skb + * @dev: associated &net_device + * @ptype: the &packet_type structure which was used to register this handler. + * @orig_dev: original receive &net_device, in case @ dev is a bond. + * + * Returns: 0 for success + */ +static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, + struct net_device *orig_dev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + interface = container_of(ptype, struct bnx2fc_interface, + fip_packet_type); + ctlr = bnx2fc_to_ctlr(interface); + fcoe_ctlr_recv(ctlr, skb); + return 0; +} + +/** + * bnx2fc_update_src_mac - Update Ethernet MAC filters. + * + * @lport: The local port + * @addr: Location of data to copy + * + * Remove any previously-set unicast MAC filter. + * Add secondary FCoE MAC address filter for our OUI. + */ +static void bnx2fc_update_src_mac(struct fc_lport *lport, u8 *addr) +{ + struct fcoe_port *port = lport_priv(lport); + + memcpy(port->data_src_addr, addr, ETH_ALEN); +} + +/** + * bnx2fc_get_src_mac - return the ethernet source address for an lport + * + * @lport: libfc port + */ +static u8 *bnx2fc_get_src_mac(struct fc_lport *lport) +{ + struct fcoe_port *port; + + port = (struct fcoe_port *)lport_priv(lport); + return port->data_src_addr; +} + +/** + * bnx2fc_fip_send - send an Ethernet-encapsulated FIP frame. + * + * @fip: FCoE controller. + * @skb: FIP Packet. + */ +static void bnx2fc_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb) +{ + struct fip_header *fiph; + struct ethhdr *eth_hdr; + u16 op; + u8 sub; + + fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2); + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op == FIP_OP_CTRL && sub == FIP_SC_SOL && bnx2fc_log_fka) + BNX2FC_MISC_DBG("Sending FKA from %pM to %pM.\n", + eth_hdr->h_source, eth_hdr->h_dest); + + skb->dev = bnx2fc_from_ctlr(fip)->netdev; + dev_queue_xmit(skb); +} + +static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fcoe_port *port = lport_priv(n_port); + struct bnx2fc_interface *interface = port->priv; + struct net_device *netdev = interface->netdev; + struct fc_lport *vn_port; + int rc; + char buf[32]; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + printk(KERN_ERR PFX "Failed to create vport, " + "WWPN (0x%s) already exists\n", + buf); + return rc; + } + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { + printk(KERN_ERR PFX "vn ports cannot be created on" + "this interface\n"); + return -EIO; + } + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + vn_port = bnx2fc_if_create(interface, &vport->dev, 1); + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + + if (!vn_port) { + printk(KERN_ERR PFX "bnx2fc_vport_create (%s) failed\n", + netdev->name); + return -EIO; + } + + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(vn_port->host) = bnx2fc_devloss_tmo; + + if (disabled) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + } else { + vn_port->boot_time = jiffies; + fc_lport_init(vn_port); + fc_fabric_login(vn_port); + fc_vport_setlink(vn_port); + } + return 0; +} + +static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport, *tmp; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry_safe(blport, tmp, &hba->vports, list) { + if (blport->lport == lport) { + list_del(&blport->list); + kfree(blport); + } + } + spin_unlock_bh(&hba->hba_lock); +} + +static int bnx2fc_vport_destroy(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; + struct fcoe_port *port = lport_priv(vn_port); + struct bnx2fc_interface *interface = port->priv; + struct fc_lport *v_port; + bool found = false; + + mutex_lock(&n_port->lp_mutex); + list_for_each_entry(v_port, &n_port->vports, list) + if (v_port->vport == vport) { + found = true; + break; + } + + if (!found) { + mutex_unlock(&n_port->lp_mutex); + return -ENOENT; + } + list_del(&vn_port->list); + mutex_unlock(&n_port->lp_mutex); + bnx2fc_free_vport(interface->hba, port->lport); + bnx2fc_port_shutdown(port->lport); + bnx2fc_port_destroy(port); + bnx2fc_interface_put(interface); + return 0; +} + +static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) +{ + struct fc_lport *lport = vport->dd_data; + + if (disable) { + fc_vport_set_state(vport, FC_VPORT_DISABLED); + fc_fabric_logoff(lport); + } else { + lport->boot_time = jiffies; + fc_fabric_login(lport); + fc_vport_setlink(lport); + } + return 0; +} + + +static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) +{ + struct net_device *netdev = interface->netdev; + struct net_device *physdev = interface->hba->phys_dev; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct netdev_hw_addr *ha; + int sel_san_mac = 0; + + /* setup Source MAC Address */ + rcu_read_lock(); + for_each_dev_addr(physdev, ha) { + BNX2FC_MISC_DBG("net_config: ha->type = %d, fip_mac = ", + ha->type); + printk(KERN_INFO "%2x:%2x:%2x:%2x:%2x:%2x\n", ha->addr[0], + ha->addr[1], ha->addr[2], ha->addr[3], + ha->addr[4], ha->addr[5]); + + if ((ha->type == NETDEV_HW_ADDR_T_SAN) && + (is_valid_ether_addr(ha->addr))) { + memcpy(ctlr->ctl_src_addr, ha->addr, + ETH_ALEN); + sel_san_mac = 1; + BNX2FC_MISC_DBG("Found SAN MAC\n"); + } + } + rcu_read_unlock(); + + if (!sel_san_mac) + return -ENODEV; + + interface->fip_packet_type.func = bnx2fc_fip_recv; + interface->fip_packet_type.type = htons(ETH_P_FIP); + interface->fip_packet_type.dev = netdev; + dev_add_pack(&interface->fip_packet_type); + + interface->fcoe_packet_type.func = bnx2fc_rcv; + interface->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE); + interface->fcoe_packet_type.dev = netdev; + dev_add_pack(&interface->fcoe_packet_type); + + return 0; +} + +static int bnx2fc_attach_transport(void) +{ + bnx2fc_transport_template = + fc_attach_transport(&bnx2fc_transport_function); + + if (bnx2fc_transport_template == NULL) { + printk(KERN_ERR PFX "Failed to attach FC transport\n"); + return -ENODEV; + } + + bnx2fc_vport_xport_template = + fc_attach_transport(&bnx2fc_vport_xport_function); + if (bnx2fc_vport_xport_template == NULL) { + printk(KERN_ERR PFX + "Failed to attach FC transport for vport\n"); + fc_release_transport(bnx2fc_transport_template); + bnx2fc_transport_template = NULL; + return -ENODEV; + } + return 0; +} +static void bnx2fc_release_transport(void) +{ + fc_release_transport(bnx2fc_transport_template); + fc_release_transport(bnx2fc_vport_xport_template); + bnx2fc_transport_template = NULL; + bnx2fc_vport_xport_template = NULL; +} + +static void bnx2fc_interface_release(struct kref *kref) +{ + struct fcoe_ctlr_device *ctlr_dev; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct net_device *netdev; + + interface = container_of(kref, struct bnx2fc_interface, kref); + BNX2FC_MISC_DBG("Interface is being released\n"); + + ctlr = bnx2fc_to_ctlr(interface); + ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr); + netdev = interface->netdev; + + /* tear-down FIP controller */ + if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) + fcoe_ctlr_destroy(ctlr); + + fcoe_ctlr_device_delete(ctlr_dev); + + dev_put(netdev); + module_put(THIS_MODULE); +} + +static inline void bnx2fc_interface_get(struct bnx2fc_interface *interface) +{ + kref_get(&interface->kref); +} + +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface) +{ + kref_put(&interface->kref, bnx2fc_interface_release); +} +static void bnx2fc_hba_destroy(struct bnx2fc_hba *hba) +{ + /* Free the command manager */ + if (hba->cmd_mgr) { + bnx2fc_cmd_mgr_free(hba->cmd_mgr); + hba->cmd_mgr = NULL; + } + kfree(hba->tgt_ofld_list); + bnx2fc_unbind_pcidev(hba); + kfree(hba); +} + +/** + * bnx2fc_hba_create - create a new bnx2fc hba + * + * @cnic: pointer to cnic device + * + * Creates a new FCoE hba on the given device. + * + */ +static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + struct fcoe_capabilities *fcoe_cap; + int rc; + + hba = kzalloc(sizeof(*hba), GFP_KERNEL); + if (!hba) { + printk(KERN_ERR PFX "Unable to allocate hba structure\n"); + return NULL; + } + spin_lock_init(&hba->hba_lock); + mutex_init(&hba->hba_mutex); + mutex_init(&hba->hba_stats_mutex); + + hba->cnic = cnic; + + hba->max_tasks = cnic->max_fcoe_exchanges; + hba->elstm_xids = (hba->max_tasks / 2); + hba->max_outstanding_cmds = hba->elstm_xids; + hba->max_xid = (hba->max_tasks - 1); + + rc = bnx2fc_bind_pcidev(hba); + if (rc) { + printk(KERN_ERR PFX "create_adapter: bind error\n"); + goto bind_err; + } + hba->phys_dev = cnic->netdev; + hba->next_conn_id = 0; + + hba->tgt_ofld_list = + kcalloc(BNX2FC_NUM_MAX_SESS, sizeof(struct bnx2fc_rport *), + GFP_KERNEL); + if (!hba->tgt_ofld_list) { + printk(KERN_ERR PFX "Unable to allocate tgt offload list\n"); + goto tgtofld_err; + } + + hba->num_ofld_sess = 0; + + hba->cmd_mgr = bnx2fc_cmd_mgr_alloc(hba); + if (!hba->cmd_mgr) { + printk(KERN_ERR PFX "em_config:bnx2fc_cmd_mgr_alloc failed\n"); + goto cmgr_err; + } + fcoe_cap = &hba->fcoe_cap; + + fcoe_cap->capability1 = BNX2FC_TM_MAX_SQES << + FCOE_IOS_PER_CONNECTION_SHIFT; + fcoe_cap->capability1 |= BNX2FC_NUM_MAX_SESS << + FCOE_LOGINS_PER_PORT_SHIFT; + fcoe_cap->capability2 = hba->max_outstanding_cmds << + FCOE_NUMBER_OF_EXCHANGES_SHIFT; + fcoe_cap->capability2 |= BNX2FC_MAX_NPIV << + FCOE_NPIV_WWN_PER_PORT_SHIFT; + fcoe_cap->capability3 = BNX2FC_NUM_MAX_SESS << + FCOE_TARGETS_SUPPORTED_SHIFT; + fcoe_cap->capability3 |= hba->max_outstanding_cmds << + FCOE_OUTSTANDING_COMMANDS_SHIFT; + fcoe_cap->capability4 = FCOE_CAPABILITY4_STATEFUL; + + init_waitqueue_head(&hba->shutdown_wait); + init_waitqueue_head(&hba->destroy_wait); + INIT_LIST_HEAD(&hba->vports); + + return hba; + +cmgr_err: + kfree(hba->tgt_ofld_list); +tgtofld_err: + bnx2fc_unbind_pcidev(hba); +bind_err: + kfree(hba); + return NULL; +} + +static struct bnx2fc_interface * +bnx2fc_interface_create(struct bnx2fc_hba *hba, + struct net_device *netdev, + enum fip_mode fip_mode) +{ + struct fcoe_ctlr_device *ctlr_dev; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int size; + int rc = 0; + + size = (sizeof(*interface) + sizeof(struct fcoe_ctlr)); + ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ, + size); + if (!ctlr_dev) { + printk(KERN_ERR PFX "Unable to allocate interface structure\n"); + return NULL; + } + ctlr = fcoe_ctlr_device_priv(ctlr_dev); + ctlr->cdev = ctlr_dev; + interface = fcoe_ctlr_priv(ctlr); + dev_hold(netdev); + kref_init(&interface->kref); + interface->hba = hba; + interface->netdev = netdev; + + /* Initialize FIP */ + fcoe_ctlr_init(ctlr, fip_mode); + ctlr->send = bnx2fc_fip_send; + ctlr->update_mac = bnx2fc_update_src_mac; + ctlr->get_src_addr = bnx2fc_get_src_mac; + set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); + + rc = bnx2fc_interface_setup(interface); + if (!rc) + return interface; + + fcoe_ctlr_destroy(ctlr); + dev_put(netdev); + fcoe_ctlr_device_delete(ctlr_dev); + return NULL; +} + +/** + * bnx2fc_if_create - Create FCoE instance on a given interface + * + * @interface: FCoE interface to create a local port on + * @parent: Device pointer to be the parent in sysfs for the SCSI host + * @npiv: Indicates if the port is vport or not + * + * Creates a fc_lport instance and a Scsi_Host instance and configure them. + * + * Returns: Allocated fc_lport or an error pointer + */ +static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, + struct device *parent, int npiv) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport, *n_port; + struct fcoe_port *port; + struct Scsi_Host *shost; + struct fc_vport *vport = dev_to_vport(parent); + struct bnx2fc_lport *blport; + struct bnx2fc_hba *hba = interface->hba; + int rc = 0; + + blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); + if (!blport) { + BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n"); + return NULL; + } + + /* Allocate Scsi_Host structure */ + bnx2fc_shost_template.can_queue = hba->max_outstanding_cmds; + if (!npiv) + lport = libfc_host_alloc(&bnx2fc_shost_template, sizeof(*port)); + else + lport = libfc_vport_create(vport, sizeof(*port)); + + if (!lport) { + printk(KERN_ERR PFX "could not allocate scsi host structure\n"); + goto free_blport; + } + shost = lport->host; + port = lport_priv(lport); + port->lport = lport; + port->priv = interface; + port->get_netdev = bnx2fc_netdev; + + /* Configure fcoe_port */ + rc = bnx2fc_lport_config(lport); + if (rc) + goto lp_config_err; + + if (npiv) { + printk(KERN_ERR PFX "Setting vport names, 0x%llX 0x%llX\n", + vport->node_name, vport->port_name); + fc_set_wwnn(lport, vport->node_name); + fc_set_wwpn(lport, vport->port_name); + } + /* Configure netdev and networking properties of the lport */ + rc = bnx2fc_net_config(lport, interface->netdev); + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); + goto lp_config_err; + } + + rc = bnx2fc_shost_config(lport, parent); + if (rc) { + printk(KERN_ERR PFX "Couldn't configure shost for %s\n", + interface->netdev->name); + goto lp_config_err; + } + + /* Initialize the libfc library */ + rc = bnx2fc_libfc_config(lport); + if (rc) { + printk(KERN_ERR PFX "Couldn't configure libfc\n"); + goto shost_err; + } + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + + if (bnx2fc_devloss_tmo) + fc_host_dev_loss_tmo(shost) = bnx2fc_devloss_tmo; + + /* Allocate exchange manager */ + if (!npiv) + rc = bnx2fc_em_config(lport, hba); + else { + shost = vport_to_shost(vport); + n_port = shost_priv(shost); + rc = fc_exch_mgr_list_clone(n_port, lport); + } + + if (rc) { + printk(KERN_ERR PFX "Error on bnx2fc_em_config\n"); + goto shost_err; + } + + bnx2fc_interface_get(interface); + + spin_lock_bh(&hba->hba_lock); + blport->lport = lport; + list_add_tail(&blport->list, &hba->vports); + spin_unlock_bh(&hba->hba_lock); + + return lport; + +shost_err: + scsi_remove_host(shost); +lp_config_err: + scsi_host_put(lport->host); +free_blport: + kfree(blport); + return NULL; +} + +static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) +{ + /* Dont listen for Ethernet packets anymore */ + __dev_remove_pack(&interface->fcoe_packet_type); + __dev_remove_pack(&interface->fip_packet_type); + synchronize_net(); +} + +static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_hba *hba = interface->hba; + + /* Stop the transmit retry timer */ + del_timer_sync(&port->timer); + + /* Free existing transmit skbs */ + fcoe_clean_pending_queue(lport); + + bnx2fc_net_cleanup(interface); + + bnx2fc_free_vport(hba, lport); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + + /* Free queued packets for the receive thread */ + bnx2fc_clean_rx_queue(lport); + + /* Detach from scsi-ml */ + fc_remove_host(lport->host); + scsi_remove_host(lport->host); + + /* + * Note that only the physical lport will have the exchange manager. + * for vports, this function is NOP + */ + fc_exch_mgr_free(lport); + + /* Free memory used by statistical counters */ + fc_lport_free_stats(lport); + + /* Release Scsi_Host */ + scsi_host_put(lport->host); +} + +static void __bnx2fc_destroy(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport = ctlr->lp; + struct fcoe_port *port = lport_priv(lport); + + bnx2fc_interface_cleanup(interface); + bnx2fc_stop(interface); + list_del(&interface->list); + bnx2fc_port_destroy(port); + bnx2fc_interface_put(interface); +} + +/** + * bnx2fc_destroy - Destroy a bnx2fc FCoE interface + * + * @netdev: The net device that the FCoE interface is on + * + * Called from sysfs. + * + * Returns: 0 for success + */ +static int bnx2fc_destroy(struct net_device *netdev) +{ + struct bnx2fc_interface *interface = NULL; + struct workqueue_struct *timer_work_queue; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + if (!interface || !ctlr->lp) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); + goto netdev_err; + } + + timer_work_queue = interface->timer_work_queue; + __bnx2fc_destroy(interface); + destroy_workqueue(timer_work_queue); + +netdev_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static void bnx2fc_port_destroy(struct fcoe_port *port) +{ + struct fc_lport *lport; + + lport = port->lport; + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport); + + bnx2fc_if_destroy(lport); +} + +static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) +{ + bnx2fc_free_fw_resc(hba); + bnx2fc_free_task_ctx(hba); +} + +/** + * bnx2fc_bind_adapter_devices - binds bnx2fc adapter with the associated + * pci structure + * + * @hba: Adapter instance + */ +static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) +{ + if (bnx2fc_setup_task_ctx(hba)) + goto mem_err; + + if (bnx2fc_setup_fw_resc(hba)) + goto mem_err; + + return 0; +mem_err: + bnx2fc_unbind_adapter_devices(hba); + return -ENOMEM; +} + +static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) +{ + struct cnic_dev *cnic; + struct pci_dev *pdev; + + if (!hba->cnic) { + printk(KERN_ERR PFX "cnic is NULL\n"); + return -ENODEV; + } + cnic = hba->cnic; + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); + return 0; +} + +static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) +{ + if (hba->pcidev) { + hba->chip_num[0] = '\0'; + pci_dev_put(hba->pcidev); + } + hba->pcidev = NULL; +} + +/** + * bnx2fc_ulp_get_stats - cnic callback to populate FCoE stats + * + * @handle: transport handle pointing to adapter structure + */ +static int bnx2fc_ulp_get_stats(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct cnic_dev *cnic; + struct fcoe_stats_info *stats_addr; + + if (!hba) + return -EINVAL; + + cnic = hba->cnic; + stats_addr = &cnic->stats_addr->fcoe_stat; + if (!stats_addr) + return -EINVAL; + + strncpy(stats_addr->version, BNX2FC_VERSION, + sizeof(stats_addr->version)); + stats_addr->txq_size = BNX2FC_SQ_WQES_MAX; + stats_addr->rxq_size = BNX2FC_CQ_WQES_MAX; + + return 0; +} + + +/** + * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * This function maps adapter structure to pcidev structure and initiates + * firmware handshake to enable/initialize on-chip FCoE components. + * This bnx2fc - cnic interface api callback is used after following + * conditions are met - + * a) underlying network interface is up (marked by event NETDEV_UP + * from netdev + * b) bnx2fc adatper structure is registered. + */ +static void bnx2fc_ulp_start(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + struct fc_lport *lport; + + mutex_lock(&bnx2fc_dev_lock); + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) + bnx2fc_fw_init(hba); + + BNX2FC_MISC_DBG("bnx2fc started.\n"); + + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) { + ctlr = bnx2fc_to_ctlr(interface); + lport = ctlr->lp; + /* Kick off Fabric discovery*/ + printk(KERN_ERR PFX "ulp_init: start discovery\n"); + lport->tt.frame_send = bnx2fc_xmit; + bnx2fc_start_disc(interface); + } + } + + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_port_shutdown(struct fc_lport *lport) +{ + BNX2FC_MISC_DBG("Entered %s\n", __func__); + fc_fabric_logoff(lport); + fc_lport_destroy(lport); +} + +static void bnx2fc_stop(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport; + struct fc_lport *vport; + + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) + return; + + lport = ctlr->lp; + bnx2fc_port_shutdown(lport); + + mutex_lock(&lport->lp_mutex); + list_for_each_entry(vport, &lport->vports, list) + fc_host_port_type(vport->host) = + FC_PORTTYPE_UNKNOWN; + mutex_unlock(&lport->lp_mutex); + fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(lport); +} + +static int bnx2fc_fw_init(struct bnx2fc_hba *hba) +{ +#define BNX2FC_INIT_POLL_TIME (1000 / HZ) + int rc = -1; + int i = HZ; + + rc = bnx2fc_bind_adapter_devices(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_bind_adapter_devices failed - rc = %d\n", rc); + goto err_out; + } + + rc = bnx2fc_send_fw_fcoe_init_msg(hba); + if (rc) { + printk(KERN_ALERT PFX + "bnx2fc_send_fw_fcoe_init_msg failed - rc = %d\n", rc); + goto err_unbind; + } + + /* + * Wait until the adapter init message is complete, and adapter + * state is UP. + */ + while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--) + msleep(BNX2FC_INIT_POLL_TIME); + + if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state)) { + printk(KERN_ERR PFX "bnx2fc_start: %s failed to initialize. " + "Ignoring...\n", + hba->cnic->netdev->name); + rc = -1; + goto err_unbind; + } + + + set_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags); + return 0; + +err_unbind: + bnx2fc_unbind_adapter_devices(hba); +err_out: + return rc; +} + +static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) +{ + if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { + if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { + timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, + 0); + hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + + jiffies; + add_timer(&hba->destroy_timer); + wait_event_interruptible(hba->destroy_wait, + test_bit(BNX2FC_FLAG_DESTROY_CMPL, + &hba->flags)); + clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + /* This should never happen */ + if (signal_pending(current)) + flush_signals(current); + + del_timer_sync(&hba->destroy_timer); + } + bnx2fc_unbind_adapter_devices(hba); + } +} + +/** + * bnx2fc_ulp_stop - cnic callback to shutdown adapter instance + * + * @handle: transport handle pointing to adapter structure + * + * Driver checks if adapter is already in shutdown mode, if not start + * the shutdown process. + */ +static void bnx2fc_ulp_stop(void *handle) +{ + struct bnx2fc_hba *hba = handle; + struct bnx2fc_interface *interface; + + printk(KERN_ERR "ULP_STOP\n"); + + mutex_lock(&bnx2fc_dev_lock); + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) + goto exit; + list_for_each_entry(interface, &if_list, list) { + if (interface->hba == hba) + bnx2fc_stop(interface); + } + BUG_ON(hba->num_ofld_sess != 0); + + mutex_lock(&hba->hba_mutex); + clear_bit(ADAPTER_STATE_UP, &hba->adapter_state); + clear_bit(ADAPTER_STATE_GOING_DOWN, + &hba->adapter_state); + + clear_bit(ADAPTER_STATE_READY, &hba->adapter_state); + mutex_unlock(&hba->hba_mutex); + + bnx2fc_fw_destroy(hba); +exit: + mutex_unlock(&bnx2fc_dev_lock); +} + +static void bnx2fc_start_disc(struct bnx2fc_interface *interface) +{ + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct fc_lport *lport; + int wait_cnt = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* Kick off FIP/FLOGI */ + if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { + printk(KERN_ERR PFX "Init not done yet\n"); + return; + } + + lport = ctlr->lp; + BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); + + if (!bnx2fc_link_ok(lport) && interface->enabled) { + BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); + fcoe_ctlr_link_up(ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + + /* wait for the FCF to be selected before issuing FLOGI */ + while (!ctlr->sel_fcf) { + msleep(250); + /* give up after 3 secs */ + if (++wait_cnt > 12) + break; + } + + /* Reset max receive frame size to default */ + if (fc_set_mfs(lport, BNX2FC_MFS)) + return; + + fc_lport_init(lport); + fc_fabric_login(lport); +} + + +/** + * bnx2fc_ulp_init - Initialize an adapter instance + * + * @dev : cnic device handle + * Called from cnic_register_driver() context to initialize all + * enumerated cnic devices. This routine allocates adapter structure + * and other device specific resources. + */ +static void bnx2fc_ulp_init(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + int rc = 0; + + BNX2FC_MISC_DBG("Entered %s\n", __func__); + /* bnx2fc works only when bnx2x is loaded */ + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) || + (dev->max_fcoe_conn == 0)) { + printk(KERN_ERR PFX "bnx2fc FCoE not supported on %s," + " flags: %lx fcoe_conn: %d\n", + dev->netdev->name, dev->flags, dev->max_fcoe_conn); + return; + } + + hba = bnx2fc_hba_create(dev); + if (!hba) { + printk(KERN_ERR PFX "hba initialization failed\n"); + return; + } + + pr_info(PFX "FCoE initialized for %s.\n", dev->netdev->name); + + /* Add HBA to the adapter list */ + mutex_lock(&bnx2fc_dev_lock); + list_add_tail(&hba->list, &adapter_list); + adapter_count++; + mutex_unlock(&bnx2fc_dev_lock); + + dev->fcoe_cap = &hba->fcoe_cap; + clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); + rc = dev->register_device(dev, CNIC_ULP_FCOE, + (void *) hba); + if (rc) + printk(KERN_ERR PFX "register_device failed, rc = %d\n", rc); + else + set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic); +} + +/* Assumes rtnl_lock and the bnx2fc_dev_lock are already taken */ +static int __bnx2fc_disable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + + if (interface->enabled) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_disable: lport not found\n"); + return -ENODEV; + } else { + interface->enabled = false; + fcoe_ctlr_link_down(ctlr); + fcoe_clean_pending_queue(ctlr->lp); + } + } + return 0; +} + +/* + * Deperecated: Use bnx2fc_enabled() + */ +static int bnx2fc_disable(struct net_device *netdev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + + if (!interface) { + rc = -ENODEV; + pr_err(PFX "bnx2fc_disable: interface not found\n"); + } else { + rc = __bnx2fc_disable(ctlr); + } + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +static uint bnx2fc_npiv_create_vports(struct fc_lport *lport, + struct cnic_fc_npiv_tbl *npiv_tbl) +{ + struct fc_vport_identifiers vpid; + uint i, created = 0; + u64 wwnn = 0; + char wwpn_str[32]; + char wwnn_str[32]; + + if (npiv_tbl->count > MAX_NPIV_ENTRIES) { + BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n"); + goto done; + } + + /* Sanity check the first entry to make sure it's not 0 */ + if (wwn_to_u64(npiv_tbl->wwnn[0]) == 0 && + wwn_to_u64(npiv_tbl->wwpn[0]) == 0) { + BNX2FC_HBA_DBG(lport, "First NPIV table entries invalid.\n"); + goto done; + } + + vpid.roles = FC_PORT_ROLE_FCP_INITIATOR; + vpid.vport_type = FC_PORTTYPE_NPIV; + vpid.disable = false; + + for (i = 0; i < npiv_tbl->count; i++) { + wwnn = wwn_to_u64(npiv_tbl->wwnn[i]); + if (wwnn == 0) { + /* + * If we get a 0 element from for the WWNN then assume + * the WWNN should be the same as the physical port. + */ + wwnn = lport->wwnn; + } + vpid.node_name = wwnn; + vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]); + scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name), + "NPIV[%u]:%016llx-%016llx", + created, vpid.port_name, vpid.node_name); + fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str)); + fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str)); + BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str, + wwpn_str); + if (fc_vport_create(lport->host, 0, &vpid)) + created++; + else + BNX2FC_HBA_DBG(lport, "Failed to create vport\n"); + } +done: + return created; +} + +static int __bnx2fc_enable(struct fcoe_ctlr *ctlr) +{ + struct bnx2fc_interface *interface = fcoe_ctlr_priv(ctlr); + struct bnx2fc_hba *hba; + struct cnic_fc_npiv_tbl *npiv_tbl; + struct fc_lport *lport; + + if (!interface->enabled) { + if (!ctlr->lp) { + pr_err(PFX "__bnx2fc_enable: lport not found\n"); + return -ENODEV; + } else if (!bnx2fc_link_ok(ctlr->lp)) { + fcoe_ctlr_link_up(ctlr); + interface->enabled = true; + } + } + + /* Create static NPIV ports if any are contained in NVRAM */ + hba = interface->hba; + lport = ctlr->lp; + + if (!hba) + goto done; + + if (!hba->cnic) + goto done; + + if (!lport) + goto done; + + if (!lport->host) + goto done; + + if (!hba->cnic->get_fc_npiv_tbl) + goto done; + + npiv_tbl = kzalloc(sizeof(struct cnic_fc_npiv_tbl), GFP_KERNEL); + if (!npiv_tbl) + goto done; + + if (hba->cnic->get_fc_npiv_tbl(hba->cnic, npiv_tbl)) + goto done_free; + + bnx2fc_npiv_create_vports(lport, npiv_tbl); +done_free: + kfree(npiv_tbl); +done: + return 0; +} + +/* + * Deprecated: Use bnx2fc_enabled() + */ +static int bnx2fc_enable(struct net_device *netdev) +{ + struct bnx2fc_interface *interface; + struct fcoe_ctlr *ctlr; + int rc = 0; + + rtnl_lock(); + mutex_lock(&bnx2fc_dev_lock); + + interface = bnx2fc_interface_lookup(netdev); + ctlr = bnx2fc_to_ctlr(interface); + if (!interface) { + rc = -ENODEV; + pr_err(PFX "bnx2fc_enable: interface not found\n"); + } else { + rc = __bnx2fc_enable(ctlr); + } + + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller + * @cdev: The FCoE Controller that is being enabled or disabled + * + * fcoe_sysfs will ensure that the state of 'enabled' has + * changed, so no checking is necessary here. This routine simply + * calls fcoe_enable or fcoe_disable, both of which are deprecated. + * When those routines are removed the functionality can be merged + * here. + */ +static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev) +{ + struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev); + + switch (cdev->enabled) { + case FCOE_CTLR_ENABLED: + return __bnx2fc_enable(ctlr); + case FCOE_CTLR_DISABLED: + return __bnx2fc_disable(ctlr); + case FCOE_CTLR_UNUSED: + default: + return -ENOTSUPP; + } +} + +enum bnx2fc_create_link_state { + BNX2FC_CREATE_LINK_DOWN, + BNX2FC_CREATE_LINK_UP, +}; + +/** + * _bnx2fc_create() - Create bnx2fc FCoE interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * @link_state: The ctlr link state on creation + * + * Called from either the libfcoe 'create' module parameter + * via fcoe_create or from fcoe_syfs's ctlr_create file. + * + * libfcoe's 'create' module parameter is deprecated so some + * consolidation of code can be done when that interface is + * removed. + * + * Returns: 0 for success + */ +static int _bnx2fc_create(struct net_device *netdev, + enum fip_mode fip_mode, + enum bnx2fc_create_link_state link_state) +{ + struct fcoe_ctlr_device *cdev; + struct fcoe_ctlr *ctlr; + struct bnx2fc_interface *interface; + struct bnx2fc_hba *hba; + struct net_device *phys_dev = netdev; + struct fc_lport *lport; + struct ethtool_drvinfo drvinfo; + int rc = 0; + int vlan_id = 0; + + BNX2FC_MISC_DBG("Entered bnx2fc_create\n"); + if (fip_mode != FIP_MODE_FABRIC) { + printk(KERN_ERR "fip mode not FABRIC\n"); + return -EIO; + } + + rtnl_lock(); + + mutex_lock(&bnx2fc_dev_lock); + + if (!try_module_get(THIS_MODULE)) { + rc = -EINVAL; + goto mod_err; + } + + /* obtain physical netdev */ + if (is_vlan_dev(netdev)) + phys_dev = vlan_dev_real_dev(netdev); + + /* verify if the physical device is a netxtreme2 device */ + if (phys_dev->ethtool_ops && phys_dev->ethtool_ops->get_drvinfo) { + memset(&drvinfo, 0, sizeof(drvinfo)); + phys_dev->ethtool_ops->get_drvinfo(phys_dev, &drvinfo); + if (strncmp(drvinfo.driver, "bnx2x", strlen("bnx2x"))) { + printk(KERN_ERR PFX "Not a netxtreme2 device\n"); + rc = -EINVAL; + goto netdev_err; + } + } else { + printk(KERN_ERR PFX "unable to obtain drv_info\n"); + rc = -EINVAL; + goto netdev_err; + } + + /* obtain interface and initialize rest of the structure */ + hba = bnx2fc_hba_lookup(phys_dev); + if (!hba) { + rc = -ENODEV; + printk(KERN_ERR PFX "bnx2fc_create: hba not found\n"); + goto netdev_err; + } + + if (bnx2fc_interface_lookup(netdev)) { + rc = -EEXIST; + goto netdev_err; + } + + interface = bnx2fc_interface_create(hba, netdev, fip_mode); + if (!interface) { + printk(KERN_ERR PFX "bnx2fc_interface_create failed\n"); + rc = -ENOMEM; + goto netdev_err; + } + + if (is_vlan_dev(netdev)) { + vlan_id = vlan_dev_vlan_id(netdev); + interface->vlan_enabled = 1; + } + + ctlr = bnx2fc_to_ctlr(interface); + cdev = fcoe_ctlr_to_ctlr_dev(ctlr); + interface->vlan_id = vlan_id; + interface->tm_timeout = BNX2FC_TM_TIMEOUT; + + interface->timer_work_queue = + create_singlethread_workqueue("bnx2fc_timer_wq"); + if (!interface->timer_work_queue) { + printk(KERN_ERR PFX "ulp_init could not create timer_wq\n"); + rc = -EINVAL; + goto ifput_err; + } + + lport = bnx2fc_if_create(interface, &cdev->dev, 0); + if (!lport) { + printk(KERN_ERR PFX "Failed to create interface (%s)\n", + netdev->name); + rc = -EINVAL; + goto if_create_err; + } + + /* Add interface to if_list */ + list_add_tail(&interface->list, &if_list); + + lport->boot_time = jiffies; + + /* Make this master N_port */ + ctlr->lp = lport; + + if (link_state == BNX2FC_CREATE_LINK_UP) + cdev->enabled = FCOE_CTLR_ENABLED; + else + cdev->enabled = FCOE_CTLR_DISABLED; + + if (link_state == BNX2FC_CREATE_LINK_UP && + !bnx2fc_link_ok(lport)) { + fcoe_ctlr_link_up(ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + + BNX2FC_HBA_DBG(lport, "create: START DISC\n"); + bnx2fc_start_disc(interface); + + if (link_state == BNX2FC_CREATE_LINK_UP) + interface->enabled = true; + + /* + * Release from kref_init in bnx2fc_interface_setup, on success + * lport should be holding a reference taken in bnx2fc_if_create + */ + bnx2fc_interface_put(interface); + /* put netdev that was held while calling dev_get_by_name */ + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return 0; + +if_create_err: + destroy_workqueue(interface->timer_work_queue); +ifput_err: + bnx2fc_net_cleanup(interface); + bnx2fc_interface_put(interface); + goto mod_err; +netdev_err: + module_put(THIS_MODULE); +mod_err: + mutex_unlock(&bnx2fc_dev_lock); + rtnl_unlock(); + return rc; +} + +/** + * bnx2fc_create() - Create a bnx2fc interface + * @netdev : The net_device object the Ethernet interface to create on + * @fip_mode: The FIP mode for this creation + * + * Called from fcoe transport + * + * Returns: 0 for success + */ +static int bnx2fc_create(struct net_device *netdev, enum fip_mode fip_mode) +{ + return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP); +} + +/** + * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs + * @netdev: The net_device to be used by the allocated FCoE Controller + * + * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr + * in a link_down state. The allows the user an opportunity to configure + * the FCoE Controller from sysfs before enabling the FCoE Controller. + * + * Creating in with this routine starts the FCoE Controller in Fabric + * mode. The user can change to VN2VN or another mode before enabling. + */ +static int bnx2fc_ctlr_alloc(struct net_device *netdev) +{ + return _bnx2fc_create(netdev, FIP_MODE_FABRIC, + BNX2FC_CREATE_LINK_DOWN); +} + +/** + * bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance + * + * @cnic: Pointer to cnic device instance + * + **/ +static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic) +{ + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(hba, &adapter_list, list) { + if (hba->cnic == cnic) + return hba; + } + return NULL; +} + +static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device + *netdev) +{ + struct bnx2fc_interface *interface; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(interface, &if_list, list) { + if (interface->netdev == netdev) + return interface; + } + return NULL; +} + +static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device + *phys_dev) +{ + struct bnx2fc_hba *hba; + + /* Called with bnx2fc_dev_lock held */ + list_for_each_entry(hba, &adapter_list, list) { + if (hba->phys_dev == phys_dev) + return hba; + } + printk(KERN_ERR PFX "adapter_lookup: hba NULL\n"); + return NULL; +} + +/** + * bnx2fc_ulp_exit - shuts down adapter instance and frees all resources + * + * @dev: cnic device handle + */ +static void bnx2fc_ulp_exit(struct cnic_dev *dev) +{ + struct bnx2fc_hba *hba; + struct bnx2fc_interface *interface, *tmp; + + BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); + + if (!test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { + printk(KERN_ERR PFX "bnx2fc port check: %s, flags: %lx\n", + dev->netdev->name, dev->flags); + return; + } + + mutex_lock(&bnx2fc_dev_lock); + hba = bnx2fc_find_hba_for_cnic(dev); + if (!hba) { + printk(KERN_ERR PFX "bnx2fc_ulp_exit: hba not found, dev 0%p\n", + dev); + mutex_unlock(&bnx2fc_dev_lock); + return; + } + + list_del_init(&hba->list); + adapter_count--; + + list_for_each_entry_safe(interface, tmp, &if_list, list) + /* destroy not called yet, move to quiesced list */ + if (interface->hba == hba) + __bnx2fc_destroy(interface); + mutex_unlock(&bnx2fc_dev_lock); + + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, CNIC_ULP_FCOE); + bnx2fc_hba_destroy(hba); +} + +static void bnx2fc_rport_terminate_io(struct fc_rport *rport) +{ + /* This is a no-op */ +} + +/** + * bnx2fc_fcoe_reset - Resets the fcoe + * + * @shost: shost the reset is from + * + * Returns: always 0 + */ +static int bnx2fc_fcoe_reset(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + fc_lport_reset(lport); + return 0; +} + + +static bool bnx2fc_match(struct net_device *netdev) +{ + struct net_device *phys_dev = netdev; + + mutex_lock(&bnx2fc_dev_lock); + if (is_vlan_dev(netdev)) + phys_dev = vlan_dev_real_dev(netdev); + + if (bnx2fc_hba_lookup(phys_dev)) { + mutex_unlock(&bnx2fc_dev_lock); + return true; + } + + mutex_unlock(&bnx2fc_dev_lock); + return false; +} + + +static struct fcoe_transport bnx2fc_transport = { + .name = {"bnx2fc"}, + .attached = false, + .list = LIST_HEAD_INIT(bnx2fc_transport.list), + .alloc = bnx2fc_ctlr_alloc, + .match = bnx2fc_match, + .create = bnx2fc_create, + .destroy = bnx2fc_destroy, + .enable = bnx2fc_enable, + .disable = bnx2fc_disable, +}; + +/** + * bnx2fc_cpu_online - Create a receive thread for an online CPU + * + * @cpu: cpu index for the online cpu + */ +static int bnx2fc_cpu_online(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + + p = &per_cpu(bnx2fc_percpu, cpu); + + thread = kthread_create_on_node(bnx2fc_percpu_io_thread, + (void *)p, cpu_to_node(cpu), + "bnx2fc_thread/%d", cpu); + if (IS_ERR(thread)) + return PTR_ERR(thread); + + /* bind thread to the cpu */ + kthread_bind(thread, cpu); + p->iothread = thread; + wake_up_process(thread); + return 0; +} + +static int bnx2fc_cpu_offline(unsigned int cpu) +{ + struct bnx2fc_percpu_s *p; + struct task_struct *thread; + struct bnx2fc_work *work, *tmp; + + BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); + + /* Prevent any new work from being queued for this CPU */ + p = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&p->fp_work_lock); + thread = p->iothread; + p->iothread = NULL; + + /* Free all work in the list */ + list_for_each_entry_safe(work, tmp, &p->work_list, list) { + list_del_init(&work->list); + bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data, + work->num_rq, work->task); + kfree(work); + } + + spin_unlock_bh(&p->fp_work_lock); + + if (thread) + kthread_stop(thread); + return 0; +} + +static int bnx2fc_slave_configure(struct scsi_device *sdev) +{ + if (!bnx2fc_queue_depth) + return 0; + + scsi_change_queue_depth(sdev, bnx2fc_queue_depth); + return 0; +} + +static enum cpuhp_state bnx2fc_online_state; + +/** + * bnx2fc_mod_init - module init entry point + * + * Initialize driver wide global data structures, and register + * with cnic module + **/ +static int __init bnx2fc_mod_init(void) +{ + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + int rc = 0; + unsigned int cpu = 0; + struct bnx2fc_percpu_s *p; + + printk(KERN_INFO PFX "%s", version); + + /* register as a fcoe transport */ + rc = fcoe_transport_attach(&bnx2fc_transport); + if (rc) { + printk(KERN_ERR "failed to register an fcoe transport, check " + "if libfcoe is loaded\n"); + goto out; + } + + INIT_LIST_HEAD(&adapter_list); + INIT_LIST_HEAD(&if_list); + mutex_init(&bnx2fc_dev_lock); + adapter_count = 0; + + /* Attach FC transport template */ + rc = bnx2fc_attach_transport(); + if (rc) + goto detach_ft; + + bnx2fc_wq = alloc_workqueue("bnx2fc", 0, 0); + if (!bnx2fc_wq) { + rc = -ENOMEM; + goto release_bt; + } + + bg = &bnx2fc_global; + skb_queue_head_init(&bg->fcoe_rx_list); + l2_thread = kthread_run(bnx2fc_l2_rcv_thread, + (void *)bg, + "bnx2fc_l2_thread"); + if (IS_ERR(l2_thread)) { + rc = PTR_ERR(l2_thread); + goto free_wq; + } + spin_lock_bh(&bg->fcoe_rx_list.lock); + bg->kthread = l2_thread; + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + for_each_possible_cpu(cpu) { + p = &per_cpu(bnx2fc_percpu, cpu); + INIT_LIST_HEAD(&p->work_list); + spin_lock_init(&p->fp_work_lock); + } + + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", + bnx2fc_cpu_online, bnx2fc_cpu_offline); + if (rc < 0) + goto stop_thread; + bnx2fc_online_state = rc; + + cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); + return 0; + +stop_thread: + kthread_stop(l2_thread); +free_wq: + destroy_workqueue(bnx2fc_wq); +release_bt: + bnx2fc_release_transport(); +detach_ft: + fcoe_transport_detach(&bnx2fc_transport); +out: + return rc; +} + +static void __exit bnx2fc_mod_exit(void) +{ + LIST_HEAD(to_be_deleted); + struct bnx2fc_hba *hba, *next; + struct fcoe_percpu_s *bg; + struct task_struct *l2_thread; + struct sk_buff *skb; + + /* + * NOTE: Since cnic calls register_driver routine rtnl_lock, + * it will have higher precedence than bnx2fc_dev_lock. + * unregister_device() cannot be called with bnx2fc_dev_lock + * held. + */ + mutex_lock(&bnx2fc_dev_lock); + list_splice_init(&adapter_list, &to_be_deleted); + adapter_count = 0; + mutex_unlock(&bnx2fc_dev_lock); + + /* Unregister with cnic */ + list_for_each_entry_safe(hba, next, &to_be_deleted, list) { + list_del_init(&hba->list); + printk(KERN_ERR PFX "MOD_EXIT:destroy hba = 0x%p\n", + hba); + bnx2fc_ulp_stop(hba); + /* unregister cnic device */ + if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, + &hba->reg_with_cnic)) + hba->cnic->unregister_device(hba->cnic, + CNIC_ULP_FCOE); + bnx2fc_hba_destroy(hba); + } + cnic_unregister_driver(CNIC_ULP_FCOE); + + /* Destroy global thread */ + bg = &bnx2fc_global; + spin_lock_bh(&bg->fcoe_rx_list.lock); + l2_thread = bg->kthread; + bg->kthread = NULL; + while ((skb = __skb_dequeue(&bg->fcoe_rx_list)) != NULL) + kfree_skb(skb); + + spin_unlock_bh(&bg->fcoe_rx_list.lock); + + if (l2_thread) + kthread_stop(l2_thread); + + cpuhp_remove_state(bnx2fc_online_state); + + destroy_workqueue(bnx2fc_wq); + /* + * detach from scsi transport + * must happen after all destroys are done + */ + bnx2fc_release_transport(); + + /* detach from fcoe transport */ + fcoe_transport_detach(&bnx2fc_transport); +} + +module_init(bnx2fc_mod_init); +module_exit(bnx2fc_mod_exit); + +static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = { + .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled, + .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb, + .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb, + + .get_fcoe_fcf_selected = fcoe_fcf_get_selected, + .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id, +}; + +static struct fc_function_template bnx2fc_transport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = bnx2fc_get_host_stats, + + .issue_fc_host_lip = bnx2fc_fcoe_reset, + + .terminate_rport_io = bnx2fc_rport_terminate_io, + + .vport_create = bnx2fc_vport_create, + .vport_delete = bnx2fc_vport_destroy, + .vport_disable = bnx2fc_vport_disable, + .bsg_request = fc_lport_bsg_request, +}; + +static struct fc_function_template bnx2fc_vport_xport_function = { + .show_host_node_name = 1, + .show_host_port_name = 1, + .show_host_supported_classes = 1, + .show_host_supported_fc4s = 1, + .show_host_active_fc4s = 1, + .show_host_maxframe_size = 1, + + .show_host_port_id = 1, + .show_host_supported_speeds = 1, + .get_host_speed = fc_get_host_speed, + .show_host_speed = 1, + .show_host_port_type = 1, + .get_host_port_state = fc_get_host_port_state, + .show_host_port_state = 1, + .show_host_symbolic_name = 1, + + .dd_fcrport_size = (sizeof(struct fc_rport_libfc_priv) + + sizeof(struct bnx2fc_rport)), + .show_rport_maxframe_size = 1, + .show_rport_supported_classes = 1, + + .show_host_fabric_name = 1, + .show_starget_node_name = 1, + .show_starget_port_name = 1, + .show_starget_port_id = 1, + .set_rport_dev_loss_tmo = fc_set_rport_loss_tmo, + .show_rport_dev_loss_tmo = 1, + .get_fc_host_stats = fc_get_host_stats, + .issue_fc_host_lip = bnx2fc_fcoe_reset, + .terminate_rport_io = fc_rport_terminate_io, + .bsg_request = fc_lport_bsg_request, +}; + +/* + * Additional scsi_host attributes. + */ +static ssize_t +bnx2fc_tm_timeout_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + + sprintf(buf, "%u\n", interface->tm_timeout); + return strlen(buf); +} + +static ssize_t +bnx2fc_tm_timeout_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct fc_lport *lport = shost_priv(shost); + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + int rval, val; + + rval = kstrtouint(buf, 10, &val); + if (rval) + return rval; + if (val > 255) + return -ERANGE; + + interface->tm_timeout = (u8)val; + return strlen(buf); +} + +static DEVICE_ATTR(tm_timeout, S_IRUGO|S_IWUSR, bnx2fc_tm_timeout_show, + bnx2fc_tm_timeout_store); + +static struct attribute *bnx2fc_host_attrs[] = { + &dev_attr_tm_timeout.attr, + NULL, +}; + +ATTRIBUTE_GROUPS(bnx2fc_host); + +/* + * scsi_host_template structure used while registering with SCSI-ml + */ +static struct scsi_host_template bnx2fc_shost_template = { + .module = THIS_MODULE, + .name = "QLogic Offload FCoE Initiator", + .queuecommand = bnx2fc_queuecommand, + .eh_timed_out = fc_eh_timed_out, + .eh_abort_handler = bnx2fc_eh_abort, /* abts */ + .eh_device_reset_handler = bnx2fc_eh_device_reset, /* lun reset */ + .eh_target_reset_handler = bnx2fc_eh_target_reset, /* tgt reset */ + .eh_host_reset_handler = fc_eh_host_reset, + .slave_alloc = fc_slave_alloc, + .change_queue_depth = scsi_change_queue_depth, + .this_id = -1, + .cmd_per_lun = 3, + .sg_tablesize = BNX2FC_MAX_BDS_PER_CMD, + .dma_boundary = 0x7fff, + .max_sectors = 0x3fbf, + .track_queue_depth = 1, + .slave_configure = bnx2fc_slave_configure, + .shost_groups = bnx2fc_host_groups, + .cmd_size = sizeof(struct bnx2fc_priv), +}; + +static struct libfc_function_template bnx2fc_libfc_fcn_templ = { + .frame_send = bnx2fc_xmit, + .elsct_send = bnx2fc_elsct_send, + .fcp_abort_io = bnx2fc_abort_io, + .fcp_cleanup = bnx2fc_cleanup, + .get_lesb = fcoe_get_lesb, + .rport_event_callback = bnx2fc_rport_event_handler, +}; + +/* + * bnx2fc_cnic_cb - global template of bnx2fc - cnic driver interface + * structure carrying callback function pointers + */ +static struct cnic_ulp_ops bnx2fc_cnic_cb = { + .owner = THIS_MODULE, + .cnic_init = bnx2fc_ulp_init, + .cnic_exit = bnx2fc_ulp_exit, + .cnic_start = bnx2fc_ulp_start, + .cnic_stop = bnx2fc_ulp_stop, + .indicate_kcqes = bnx2fc_indicate_kcqe, + .indicate_netevent = bnx2fc_indicate_netevent, + .cnic_get_stats = bnx2fc_ulp_get_stats, +}; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c new file mode 100644 index 000000000..776544385 --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -0,0 +1,2199 @@ +/* bnx2fc_hwi.c: QLogic Linux FCoE offload driver. + * This file contains the code that low level functions that interact + * with 57712 FCoE firmware. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); + +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe); +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe); +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe); + +int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_stat stat_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); + stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; + stat_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; + stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &stat_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w + * + * @hba: adapter structure pointer + * + * Send down FCoE firmware init KWQEs which initiates the initial handshake + * with the f/w. + * + */ +int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_init1 fcoe_init1; + struct fcoe_kwqe_init2 fcoe_init2; + struct fcoe_kwqe_init3 fcoe_init3; + struct kwqe *kwqe_arr[3]; + int num_kwqes = 3; + int rc = 0; + + if (!hba->cnic) { + printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); + return -ENODEV; + } + + /* fill init1 KWQE */ + memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); + fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; + fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init1.num_tasks = hba->max_tasks; + fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; + fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; + fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; + fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; + fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; + fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); + fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; + fcoe_init1.task_list_pbl_addr_hi = + (u32) ((u64) hba->task_ctx_bd_dma >> 32); + fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; + + fcoe_init1.flags = (PAGE_SHIFT << + FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); + + fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; + + /* fill init2 KWQE */ + memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); + fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; + fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; + fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; + + + fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; + fcoe_init2.hash_tbl_pbl_addr_hi = (u32) + ((u64) hba->hash_tbl_pbl_dma >> 32); + + fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; + fcoe_init2.t2_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_dma >> 32); + + fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; + fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) + ((u64) hba->t2_hash_tbl_ptr_dma >> 32); + + fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; + + /* fill init3 KWQE */ + memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); + fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; + fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + fcoe_init3.error_bit_map_lo = 0xffffffff; + fcoe_init3.error_bit_map_hi = 0xffffffff; + + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; + + kwqe_arr[0] = (struct kwqe *) &fcoe_init1; + kwqe_arr[1] = (struct kwqe *) &fcoe_init2; + kwqe_arr[2] = (struct kwqe *) &fcoe_init3; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} +int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) +{ + struct fcoe_kwqe_destroy fcoe_destroy; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = -1; + + /* fill destroy KWQE */ + memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); + fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; + fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << + FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_ofld_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct fc_lport *lport = port->lport; + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct kwqe *kwqe_arr[4]; + struct fcoe_kwqe_conn_offload1 ofld_req1; + struct fcoe_kwqe_conn_offload2 ofld_req2; + struct fcoe_kwqe_conn_offload3 ofld_req3; + struct fcoe_kwqe_conn_offload4 ofld_req4; + struct fc_rport_priv *rdata = tgt->rdata; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 4; + u32 port_id; + int rc = 0; + u16 conn_id; + + /* Initialize offload request 1 structure */ + memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); + + ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; + ofld_req1.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + + conn_id = (u16)tgt->fcoe_conn_id; + ofld_req1.fcoe_conn_id = conn_id; + + + ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; + ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); + + ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; + ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); + + ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; + ofld_req1.rq_first_pbe_addr_hi = + (u32)((u64) tgt->rq_dma >> 32); + + ofld_req1.rq_prod = 0x8000; + + /* Initialize offload request 2 structure */ + memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); + + ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; + ofld_req2.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; + + ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; + ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); + + ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; + ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); + + ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; + ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); + + /* Initialize offload request 3 structure */ + memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); + + ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; + ofld_req3.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req3.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; + ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; + + port_id = fc_host_port_id(lport->host); + if (port_id == 0) { + BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); + return -EINVAL; + } + + /* + * Store s_id of the initiator for further reference. This will + * be used during disable/destroy during linkdown processing as + * when the lport is reset, the port_id also is reset to 0 + */ + tgt->sid = port_id; + ofld_req3.s_id[0] = (port_id & 0x000000FF); + ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + ofld_req3.d_id[0] = (port_id & 0x000000FF); + ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; + ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; + + ofld_req3.tx_total_conc_seqs = rdata->max_seq; + + ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; + ofld_req3.rx_max_fc_pay_len = lport->mfs; + + ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; + ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; + ofld_req3.rx_open_seqs_exch_c3 = 1; + + ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; + ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); + + /* set mul_n_port_ids supported flag to 0, until it is supported */ + ofld_req3.flags = 0; + /* + ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << + FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); + */ + /* Info from PLOGI response */ + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); + + ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); + + /* + * Info from PRLI response, this info is used for sequence level error + * recovery support + */ + if (tgt->dev_type == TYPE_TAPE) { + ofld_req3.flags |= 1 << + FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; + ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) + ? 1 : 0) << + FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); + } + + /* vlan flag */ + ofld_req3.flags |= (interface->vlan_enabled << + FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); + + /* C2_VALID and ACK flags are not set as they are not supported */ + + + /* Initialize offload request 4 structure */ + memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); + ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; + ofld_req4.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; + + + ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; + /* local mac */ + ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; + ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; + ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; + ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; + ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; + ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + /* fcf mac */ + ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; + ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); + + ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; + ofld_req4.confq_pbl_base_addr_hi = + (u32)((u64) tgt->confq_pbl_dma >> 32); + + kwqe_arr[0] = (struct kwqe *) &ofld_req1; + kwqe_arr[1] = (struct kwqe *) &ofld_req2; + kwqe_arr[2] = (struct kwqe *) &ofld_req3; + kwqe_arr[3] = (struct kwqe *) &ofld_req4; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_enable_req - initiates FCoE Session enablement + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_enable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct kwqe *kwqe_arr[2]; + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_kwqe_conn_enable_disable enbl_req; + struct fc_lport *lport = port->lport; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&enbl_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; + enbl_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; + /* local mac */ + enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; + enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; + enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; + enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; + enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; + memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); + + enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + port_id = fc_host_port_id(lport->host); + if (port_id != tgt->sid) { + printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," + "sid = 0x%x\n", port_id, tgt->sid); + port_id = tgt->sid; + } + enbl_req.s_id[0] = (port_id & 0x000000FF); + enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + port_id = rport->port_id; + enbl_req.d_id[0] = (port_id & 0x000000FF); + enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + enbl_req.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + enbl_req.vlan_flag = interface->vlan_enabled; + enbl_req.context_id = tgt->context_id; + enbl_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &enbl_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + return rc; +} + +/** + * bnx2fc_send_session_disable_req - initiates FCoE Session disable + * + * @port: port structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_disable_req(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_interface *interface = port->priv; + struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); + struct bnx2fc_hba *hba = interface->hba; + struct fcoe_kwqe_conn_enable_disable disable_req; + struct kwqe *kwqe_arr[2]; + struct fc_rport *rport = tgt->rport; + int num_kwqes = 1; + int rc = 0; + u32 port_id; + + memset(&disable_req, 0x00, + sizeof(struct fcoe_kwqe_conn_enable_disable)); + disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; + disable_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; + disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; + disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; + disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; + disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; + disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; + + disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; + disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; + disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; + disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; + disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; + disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; + + port_id = tgt->sid; + disable_req.s_id[0] = (port_id & 0x000000FF); + disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; + + + port_id = rport->port_id; + disable_req.d_id[0] = (port_id & 0x000000FF); + disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; + disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; + disable_req.context_id = tgt->context_id; + disable_req.conn_id = tgt->fcoe_conn_id; + disable_req.vlan_tag = interface->vlan_id << + FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; + disable_req.vlan_tag |= + 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; + disable_req.vlan_flag = interface->vlan_enabled; + + kwqe_arr[0] = (struct kwqe *) &disable_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +/** + * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + */ +int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + struct fcoe_kwqe_conn_destroy destroy_req; + struct kwqe *kwqe_arr[2]; + int num_kwqes = 1; + int rc = 0; + + memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); + destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; + destroy_req.hdr.flags = + (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); + + destroy_req.context_id = tgt->context_id; + destroy_req.conn_id = tgt->fcoe_conn_id; + + kwqe_arr[0] = (struct kwqe *) &destroy_req; + + if (hba->cnic && hba->cnic->submit_kwqes) + rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); + + return rc; +} + +static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry(blport, &hba->vports, list) { + if (blport->lport == lport) { + spin_unlock_bh(&hba->hba_lock); + return true; + } + } + spin_unlock_bh(&hba->hba_lock); + return false; + +} + + +static void bnx2fc_unsol_els_work(struct work_struct *work) +{ + struct bnx2fc_unsol_els *unsol_els; + struct fc_lport *lport; + struct bnx2fc_hba *hba; + struct fc_frame *fp; + + unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); + lport = unsol_els->lport; + fp = unsol_els->fp; + hba = unsol_els->hba; + if (is_valid_lport(hba, lport)) + fc_exch_recv(lport, fp); + kfree(unsol_els); +} + +void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, + unsigned char *buf, + u32 frame_len, u16 l2_oxid) +{ + struct fcoe_port *port = tgt->port; + struct fc_lport *lport = port->lport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_unsol_els *unsol_els; + struct fc_frame_header *fh; + struct fc_frame *fp; + struct sk_buff *skb; + u32 payload_len; + u32 crc; + u8 op; + + + unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); + if (!unsol_els) { + BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", + l2_oxid, frame_len); + + payload_len = frame_len - sizeof(struct fc_frame_header); + + fp = fc_frame_alloc(lport, payload_len); + if (!fp) { + printk(KERN_ERR PFX "fc_frame_alloc failure\n"); + kfree(unsol_els); + return; + } + + fh = (struct fc_frame_header *) fc_frame_header_get(fp); + /* Copy FC Frame header and payload into the frame */ + memcpy(fh, buf, frame_len); + + if (l2_oxid != FC_XID_UNKNOWN) + fh->fh_ox_id = htons(l2_oxid); + + skb = fp_skb(fp); + + if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || + (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { + + if (fh->fh_type == FC_TYPE_ELS) { + op = fc_frame_payload_op(fp); + if ((op == ELS_TEST) || (op == ELS_ESTC) || + (op == ELS_FAN) || (op == ELS_CSU)) { + /* + * No need to reply for these + * ELS requests + */ + printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); + kfree_skb(skb); + kfree(unsol_els); + return; + } + } + crc = fcoe_fc_crc(fp); + fc_frame_init(fp); + fr_dev(fp) = lport; + fr_sof(fp) = FC_SOF_I3; + fr_eof(fp) = FC_EOF_T; + fr_crc(fp) = cpu_to_le32(~crc); + unsol_els->lport = lport; + unsol_els->hba = interface->hba; + unsol_els->fp = fp; + INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); + queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); + } else { + BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); + kfree_skb(skb); + kfree(unsol_els); + } +} + +static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) +{ + u8 num_rq; + struct fcoe_err_report_entry *err_entry; + unsigned char *rq_data; + unsigned char *buf = NULL, *buf1; + int i; + u16 xid; + u32 frame_len, len; + struct bnx2fc_cmd *io_req = NULL; + struct bnx2fc_interface *interface = tgt->port->priv; + struct bnx2fc_hba *hba = interface->hba; + int rc = 0; + u64 err_warn_bit_map; + u8 err_warn = 0xff; + + + BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); + switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { + case FCOE_UNSOLICITED_FRAME_CQE_TYPE: + frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> + FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; + + num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; + + spin_lock_bh(&tgt->tgt_lock); + rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + + if (rq_data) { + buf = rq_data; + } else { + buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), + GFP_ATOMIC); + + if (!buf1) { + BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); + break; + } + + for (i = 0; i < num_rq; i++) { + spin_lock_bh(&tgt->tgt_lock); + rq_data = (unsigned char *) + bnx2fc_get_next_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + len = BNX2FC_RQ_BUF_SZ; + memcpy(buf1, rq_data, len); + buf1 += len; + } + } + bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, + FC_XID_UNKNOWN); + + if (buf != rq_data) + kfree(buf); + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_return_rqe(tgt, num_rq); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_ERROR_DETECTION_CQE_TYPE: + /* + * In case of error reporting CQE a single RQ entry + * is consumed. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = err_entry->fc_hdr.ox_id; + BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); + + if (xid > hba->max_xid) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", + xid); + goto ret_err_rqe; + } + + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) + goto ret_err_rqe; + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + goto ret_err_rqe; + } + + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " + "progress.. ignore unsol err\n"); + goto ret_err_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & (u64)((u64)1 << i)) { + err_warn = i; + break; + } + } + + /* + * If ABTS is already in progress, and FW error is + * received after that, do not cancel the timeout_work + * and let the error recovery continue by explicitly + * logging out the target, when the ABTS eventually + * times out. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " + "in ABTS processing\n", xid); + goto ret_err_rqe; + } + BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); + if (tgt->dev_type != TYPE_TAPE) + goto skip_rec; + switch (err_warn) { + case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: + case FCOE_ERROR_CODE_DATA_OOO_RO: + case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: + case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: + case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: + case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: + BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", + xid); + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + if (!test_bit(BNX2FC_FLAG_SRR_SENT, + &io_req->req_flags)) { + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_rec(io_req); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + goto skip_rec; + } else + printk(KERN_ERR PFX "SRR in progress\n"); + goto ret_err_rqe; + default: + break; + } + +skip_rec: + set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); + /* + * Cancel the timeout_work, as we received IO + * completion with FW error. + */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + + rc = bnx2fc_initiate_abts(io_req); + if (rc != SUCCESS) { + printk(KERN_ERR PFX "err_warn: initiate_abts " + "failed xid = 0x%x. issue cleanup\n", + io_req->xid); + bnx2fc_initiate_cleanup(io_req); + } +ret_err_rqe: + bnx2fc_return_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + break; + + case FCOE_WARNING_DETECTION_CQE_TYPE: + /* + *In case of warning reporting CQE a single RQ entry + * is consumes. + */ + spin_lock_bh(&tgt->tgt_lock); + num_rq = 1; + err_entry = (struct fcoe_err_report_entry *) + bnx2fc_get_next_rqe(tgt, 1); + xid = cpu_to_be16(err_entry->fc_hdr.ox_id); + BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); + BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", + err_entry->data.err_warn_bitmap_hi, + err_entry->data.err_warn_bitmap_lo); + BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", + err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); + + if (xid > hba->max_xid) { + BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); + goto ret_warn_rqe; + } + + err_warn_bit_map = (u64) + ((u64)err_entry->data.err_warn_bitmap_hi << 32) | + (u64)err_entry->data.err_warn_bitmap_lo; + for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { + if (err_warn_bit_map & ((u64)1 << i)) { + err_warn = i; + break; + } + } + BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); + + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + if (!io_req) + goto ret_warn_rqe; + + if (io_req->cmd_type != BNX2FC_SCSI_CMD) { + printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); + goto ret_warn_rqe; + } + + memcpy(&io_req->err_entry, err_entry, + sizeof(struct fcoe_err_report_entry)); + + if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) + /* REC_TOV is not a warning code */ + BUG_ON(1); + else + BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); +ret_warn_rqe: + bnx2fc_return_rqe(tgt, 1); + spin_unlock_bh(&tgt->tgt_lock); + break; + + default: + printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); + break; + } +} + +void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_cmd *io_req; + + u16 xid; + u8 cmd_type; + u8 rx_state = 0; + + spin_lock_bh(&tgt->tgt_lock); + + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; + + if (io_req == NULL) { + printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + /* Timestamp IO completion time */ + cmd_type = io_req->cmd_type; + + rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); + + /* Process other IO completion types */ + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { + bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq, + rq_data); + spin_unlock_bh(&tgt->tgt_lock); + return; + } + + if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state - %d\n", + rx_state); + break; + + case BNX2FC_TASK_MGMT_CMD: + BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); + bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data); + break; + + case BNX2FC_ABTS: + /* + * ABTS request received by firmware. ABTS response + * will be delivered to the task belonging to the IO + * that was aborted + */ + BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_ELS: + if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) + bnx2fc_process_els_compl(io_req, task, num_rq); + else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) + bnx2fc_process_abts_compl(io_req, task, num_rq); + else if (rx_state == + FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + else + printk(KERN_ERR PFX "Invalid rx state = %d\n", + rx_state); + break; + + case BNX2FC_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + case BNX2FC_SEQ_CLEANUP: + BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", + io_req->xid); + bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + break; + + default: + printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); + break; + } + spin_unlock_bh(&tgt->tgt_lock); +} + +void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) +{ + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; + u32 msg; + + wmb(); + rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT); + msg = *((u32 *)rx_db); + writel(cpu_to_le32(msg), tgt->ctx_base); + +} + +static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe, + unsigned char *rq_data, u8 num_rq, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_work *work; + work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); + if (!work) + return NULL; + + INIT_LIST_HEAD(&work->list); + work->tgt = tgt; + work->wqe = wqe; + work->num_rq = num_rq; + work->task = task; + if (rq_data) + memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ); + + return work; +} + +/* Pending work request completion */ +static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) +{ + unsigned int cpu = wqe % num_possible_cpus(); + struct bnx2fc_percpu_s *fps; + struct bnx2fc_work *work; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + unsigned char *rq_data = NULL; + unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ]; + int task_idx, index; + u16 xid; + u8 num_rq; + int i; + + xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; + if (xid >= hba->max_tasks) { + pr_err(PFX "ERROR:xid out of range\n"); + return false; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; + task = &task_page[index]; + + num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> + FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); + + memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ); + + if (!num_rq) + goto num_rq_zero; + + rq_data = bnx2fc_get_next_rqe(tgt, 1); + + if (num_rq > 1) { + /* We do not need extra sense data */ + for (i = 1; i < num_rq; i++) + bnx2fc_get_next_rqe(tgt, 1); + } + + if (rq_data) + memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ); + + /* return RQ entries */ + for (i = 0; i < num_rq; i++) + bnx2fc_return_rqe(tgt, 1); + +num_rq_zero: + + fps = &per_cpu(bnx2fc_percpu, cpu); + spin_lock_bh(&fps->fp_work_lock); + if (fps->iothread) { + work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff, + num_rq, task); + if (work) { + list_add_tail(&work->list, &fps->work_list); + wake_up_process(fps->iothread); + spin_unlock_bh(&fps->fp_work_lock); + return true; + } + } + spin_unlock_bh(&fps->fp_work_lock); + bnx2fc_process_cq_compl(tgt, wqe, + rq_data_buff, num_rq, task); + + return true; +} + +int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) +{ + struct fcoe_cqe *cq; + u32 cq_cons; + struct fcoe_cqe *cqe; + u32 num_free_sqes = 0; + u32 num_cqes = 0; + u16 wqe; + + /* + * cq_lock is a low contention lock used to protect + * the CQ data structure from being freed up during + * the upload operation + */ + spin_lock_bh(&tgt->cq_lock); + + if (!tgt->cq) { + printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); + spin_unlock_bh(&tgt->cq_lock); + return 0; + } + cq = tgt->cq; + cq_cons = tgt->cq_cons_idx; + cqe = &cq[cq_cons]; + + while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == + (tgt->cq_curr_toggle_bit << + FCOE_CQE_TOGGLE_BIT_SHIFT)) { + + /* new entry on the cq */ + if (wqe & FCOE_CQE_CQE_TYPE) { + /* Unsolicited event notification */ + bnx2fc_process_unsol_compl(tgt, wqe); + } else { + if (bnx2fc_pending_work(tgt, wqe)) + num_free_sqes++; + } + cqe++; + tgt->cq_cons_idx++; + num_cqes++; + + if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { + tgt->cq_cons_idx = 0; + cqe = cq; + tgt->cq_curr_toggle_bit = + 1 - tgt->cq_curr_toggle_bit; + } + } + if (num_cqes) { + /* Arm CQ only if doorbell is mapped */ + if (tgt->ctx_base) + bnx2fc_arm_cq(tgt); + atomic_add(num_free_sqes, &tgt->free_sqes); + } + spin_unlock_bh(&tgt->cq_lock); + return 0; +} + +/** + * bnx2fc_fastpath_notification - process global event queue (KCQ) + * + * @hba: adapter structure pointer + * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry + * + * Fast path event notification handler + */ +static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, + struct fcoe_kcqe *new_cqe_kcqe) +{ + u32 conn_id = new_cqe_kcqe->fcoe_conn_id; + struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; + + if (!tgt) { + printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); + return; + } + + bnx2fc_process_new_cqes(tgt); +} + +/** + * bnx2fc_process_ofld_cmpl - process FCoE session offload completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session offload completion, enable the session if offload is + * successful. + */ +static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct bnx2fc_interface *interface; + u32 conn_id; + u32 context_id; + + conn_id = ofld_kcqe->fcoe_conn_id; + context_id = ofld_kcqe->fcoe_conn_context_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); + return; + } + BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mismatch\n"); + goto ofld_cmpl_err; + } + /* + * cnic has allocated a context_id for this session; use this + * while enabling the session. + */ + tgt->context_id = context_id; + if (ofld_kcqe->completion_status) { + if (ofld_kcqe->completion_status == + FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { + printk(KERN_ERR PFX "unable to allocate FCoE context " + "resources\n"); + set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); + } + } else { + /* FW offload request successfully completed */ + set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + } +ofld_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +/** + * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion + * + * @hba: adapter structure pointer + * @ofld_kcqe: connection offload kcqe pointer + * + * handle session enable completion, mark the rport as ready + */ + +static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *ofld_kcqe) +{ + struct bnx2fc_rport *tgt; + struct bnx2fc_interface *interface; + u32 conn_id; + u32 context_id; + + context_id = ofld_kcqe->fcoe_conn_context_id; + conn_id = ofld_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", + ofld_kcqe->fcoe_conn_context_id); + + /* + * context_id should be the same for this target during offload + * and enable + */ + if (tgt->context_id != context_id) { + printk(KERN_ERR PFX "context id mismatch\n"); + return; + } + interface = tgt->port->priv; + if (hba != interface->hba) { + printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mismatch\n"); + goto enbl_cmpl_err; + } + if (!ofld_kcqe->completion_status) + /* enable successful - rport ready for issuing IOs */ + set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + +enbl_cmpl_err: + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *disable_kcqe) +{ + + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = disable_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); + + if (disable_kcqe->completion_status) { + printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", + disable_kcqe->completion_status); + set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } else { + /* disable successful */ + BNX2FC_TGT_DBG(tgt, "disable successful\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, + struct fcoe_kcqe *destroy_kcqe) +{ + struct bnx2fc_rport *tgt; + u32 conn_id; + + conn_id = destroy_kcqe->fcoe_conn_id; + tgt = hba->tgt_ofld_list[conn_id]; + if (!tgt) { + printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); + return; + } + + BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); + + if (destroy_kcqe->completion_status) { + printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", + destroy_kcqe->completion_status); + return; + } else { + /* destroy successful */ + BNX2FC_TGT_DBG(tgt, "upload successful\n"); + clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); + } +} + +static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) +{ + switch (err_code) { + case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: + printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: + printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); + break; + + case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: + printk(KERN_ERR PFX "init_failure due to NIC error\n"); + break; + case FCOE_KCQE_COMPLETION_STATUS_ERROR: + printk(KERN_ERR PFX "init failure due to compl status err\n"); + break; + case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: + printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); + break; + default: + printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); + } +} + +/** + * bnx2fc_indicate_kcqe() - process KCQE + * + * @context: adapter structure pointer + * @kcq: kcqe pointer + * @num_cqe: Number of completion queue elements + * + * Generic KCQ event handler + */ +void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], + u32 num_cqe) +{ + struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; + int i = 0; + struct fcoe_kcqe *kcqe = NULL; + + while (i < num_cqe) { + kcqe = (struct fcoe_kcqe *) kcq[i++]; + + switch (kcqe->op_code) { + case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: + bnx2fc_fastpath_notification(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_OFFLOAD_CONN: + bnx2fc_process_ofld_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_ENABLE_CONN: + bnx2fc_process_enable_conn_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_INIT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + bnx2fc_init_failure(hba, + kcqe->completion_status); + } else { + set_bit(ADAPTER_STATE_UP, &hba->adapter_state); + bnx2fc_get_link_state(hba); + printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", + (u8)hba->pcidev->bus->number); + } + break; + + case FCOE_KCQE_OPCODE_DESTROY_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { + + printk(KERN_ERR PFX "DESTROY failed\n"); + } else { + printk(KERN_ERR PFX "DESTROY success\n"); + } + set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); + wake_up_interruptible(&hba->destroy_wait); + break; + + case FCOE_KCQE_OPCODE_DISABLE_CONN: + bnx2fc_process_conn_disable_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_DESTROY_CONN: + bnx2fc_process_conn_destroy_cmpl(hba, kcqe); + break; + + case FCOE_KCQE_OPCODE_STAT_FUNC: + if (kcqe->completion_status != + FCOE_KCQE_COMPLETION_STATUS_SUCCESS) + printk(KERN_ERR PFX "STAT failed\n"); + complete(&hba->stat_req_done); + break; + + case FCOE_KCQE_OPCODE_FCOE_ERROR: + default: + printk(KERN_ERR PFX "unknown opcode 0x%x\n", + kcqe->op_code); + } + } +} + +void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) +{ + struct fcoe_sqe *sqe; + + sqe = &tgt->sq[tgt->sq_prod_idx]; + + /* Fill SQ WQE */ + sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; + sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; + + /* Advance SQ Prod Idx */ + if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { + tgt->sq_prod_idx = 0; + tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; + } +} + +void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) +{ + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; + u32 msg; + + wmb(); + sq_db->prod = tgt->sq_prod_idx | + (tgt->sq_curr_toggle_bit << 15); + msg = *((u32 *)sq_db); + writel(cpu_to_le32(msg), tgt->ctx_base); + +} + +int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) +{ + u32 context_id = tgt->context_id; + struct fcoe_port *port = tgt->port; + u32 reg_off; + resource_size_t reg_base; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + + reg_base = pci_resource_start(hba->pcidev, + BNX2X_DOORBELL_PCI_BAR); + reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); + tgt->ctx_base = ioremap(reg_base + reg_off, 4); + if (!tgt->ctx_base) + return -ENOMEM; + return 0; +} + +char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); + + if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) + return NULL; + + tgt->rq_cons_idx += num_items; + + if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) + tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; + + return buf; +} + +void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) +{ + /* return the rq buffer */ + u32 next_prod_idx = tgt->rq_prod_idx + num_items; + if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { + /* Wrap around RQ */ + next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; + } + tgt->rq_prod_idx = next_prod_idx; + tgt->conn_db->rq_prod = tgt->rq_prod_idx; +} + +void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + struct bnx2fc_cmd *orig_io_req, + u32 offset) +{ + struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; + struct bnx2fc_rport *tgt = seq_clnp_req->tgt; + struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; + struct fcoe_ext_mul_sges_ctx *sgl; + u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; + u8 orig_task_type; + u16 orig_xid = orig_io_req->xid; + u32 context_id = tgt->context_id; + u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; + u32 orig_offset = offset; + int bd_count; + int i; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + orig_task_type = FCOE_TASK_TYPE_WRITE; + else + orig_task_type = FCOE_TASK_TYPE_READ; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; + task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; + + bd_count = orig_io_req->bd_tbl->bd_valid; + + /* obtain the appropriate bd entry from relative offset */ + for (i = 0; i < bd_count; i++) { + if (offset < bd[i].buf_len) + break; + offset -= bd[i].buf_len; + } + phys_addr += (i * sizeof(struct fcoe_bd_ctx)); + + if (orig_task_type == FCOE_TASK_TYPE_WRITE) { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)phys_addr; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)phys_addr >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_count; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = + offset; /* adjusted offset */ + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; + } else { + + /* Multiple SGEs were used for this IO */ + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; + sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); + sgl->mul_sgl.sgl_size = bd_count; + sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ + sgl->mul_sgl.cur_sge_idx = i; + + memset(&task->rxwr_only.rx_seq_ctx, 0, + sizeof(struct fcoe_rx_seq_ctx)); + task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; + task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; + } +} +void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u16 orig_xid) +{ + u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; + struct bnx2fc_rport *tgt = io_req->tgt; + u32 context_id = tgt->context_id; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Tx Write Rx Read */ + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; + + /* Tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = + FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Rx Read Tx Write */ + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; +} + +void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_frame_header *fc_hdr; + struct fcoe_ext_mul_sges_ctx *sgl; + u8 task_type = 0; + u64 *hdr; + u64 temp_hdr[3]; + u32 context_id; + + + /* Obtain task_type */ + if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || + (io_req->cmd_type == BNX2FC_ELS)) { + task_type = FCOE_TASK_TYPE_MIDPATH; + } else if (io_req->cmd_type == BNX2FC_ABTS) { + task_type = FCOE_TASK_TYPE_ABTS; + } + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", + io_req->cmd_type, task_type); + + /* Tx only */ + if ((task_type == FCOE_TASK_TYPE_MIDPATH) || + (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)mp_req->mp_req_bd_dma; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_req_bd_dma >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; + } + + /* Tx Write Rx Read */ + /* init flags */ + task->txwr_rxrd.const_ctx.init_flags = task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (tgt->dev_type == TYPE_TAPE) + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Rx Write Tx Read */ + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; + + /* rx flags */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + fc_hdr = &(mp_req->req_fc_hdr); + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); + fc_hdr->fh_rx_id = htons(0xffff); + task->rxwr_txrd.var_ctx.rx_id = 0xffff; + } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { + fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); + } + + /* Fill FC Header into middle path buffer */ + hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; + memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + /* Rx Only */ + if (task_type == FCOE_TASK_TYPE_MIDPATH) { + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + + sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)mp_req->mp_resp_bd_dma >> 32); + sgl->mul_sgl.sgl_size = 1; + } +} + +void bnx2fc_init_task(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task) +{ + u8 task_type; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct io_bdt *bd_tbl = io_req->bd_tbl; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fcoe_cached_sge_ctx *cached_sge; + struct fcoe_ext_mul_sges_ctx *sgl; + int dev_type = tgt->dev_type; + u64 *fcp_cmnd; + u64 tmp_fcp_cmnd[4]; + u32 context_id; + int cnt, i; + int bd_count; + + memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); + + /* Setup the task from io_req for easy reference */ + io_req->task = task; + + if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) + task_type = FCOE_TASK_TYPE_WRITE; + else + task_type = FCOE_TASK_TYPE_READ; + + /* Tx only */ + bd_count = bd_tbl->bd_valid; + cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; + if (task_type == FCOE_TASK_TYPE_WRITE) { + if ((dev_type == TYPE_DISK) && (bd_count == 1)) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = + cached_sge->cur_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = + cached_sge->cur_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = + cached_sge->cur_buf_rem = + fcoe_bd_tbl->buf_len; + + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else { + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = + (u32)bd_tbl->bd_tbl_dma; + task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = + bd_tbl->bd_valid; + } + } + + /*Tx Write Rx Read */ + /* Init state to NORMAL */ + task->txwr_rxrd.const_ctx.init_flags |= task_type << + FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; + if (dev_type == TYPE_TAPE) { + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_TAPE << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + io_req->rec_retry = 0; + io_req->rec_retry = 0; + } else + task->txwr_rxrd.const_ctx.init_flags |= + FCOE_TASK_DEV_TYPE_DISK << + FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; + task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << + FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; + /* tx flags */ + task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << + FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; + + /* Set initial seq counter */ + task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; + + /* Fill FCP_CMND IU */ + fcp_cmnd = (u64 *) + task->txwr_rxrd.union_ctx.fcp_cmd.opaque; + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); + + /* swap fcp_cmnd */ + cnt = sizeof(struct fcp_cmnd) / sizeof(u64); + + for (i = 0; i < cnt; i++) { + *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); + fcp_cmnd++; + } + + /* Rx Write Tx Read */ + task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; + + context_id = tgt->context_id; + task->rxwr_txrd.const_ctx.init_flags = context_id << + FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; + + /* rx flags */ + /* Set state to "waiting for the first packet" */ + task->rxwr_txrd.var_ctx.rx_flags |= 1 << + FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; + + task->rxwr_txrd.var_ctx.rx_id = 0xffff; + + /* Rx Only */ + if (task_type != FCOE_TASK_TYPE_READ) + return; + + sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; + bd_count = bd_tbl->bd_valid; + + if (dev_type == TYPE_DISK) { + if (bd_count == 1) { + + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else if (bd_count == 2) { + struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; + + cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; + cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; + cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; + + fcoe_bd_tbl++; + cached_sge->second_buf_addr.lo = + fcoe_bd_tbl->buf_addr_lo; + cached_sge->second_buf_addr.hi = + fcoe_bd_tbl->buf_addr_hi; + cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; + task->txwr_rxrd.const_ctx.init_flags |= 1 << + FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; + } else { + + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + sgl->mul_sgl.sgl_size = bd_count; + } + } else { + sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; + sgl->mul_sgl.cur_sge_addr.hi = + (u32)((u64)bd_tbl->bd_tbl_dma >> 32); + sgl->mul_sgl.sgl_size = bd_count; + } +} + +/** + * bnx2fc_setup_task_ctx - allocate and map task context + * + * @hba: pointer to adapter structure + * + * allocate memory for task context, and associated BD table to be used + * by firmware + * + */ +int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) +{ + int rc = 0; + struct regpair *task_ctx_bdt; + dma_addr_t addr; + int task_ctx_arr_sz; + int i; + + /* + * Allocate task context bd table. A page size of bd table + * can map 256 buffers. Each buffer contains 32 task context + * entries. Hence the limit with one page is 8192 task context + * entries. + */ + hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_bd_dma, + GFP_KERNEL); + if (!hba->task_ctx_bd_tbl) { + printk(KERN_ERR PFX "unable to allocate task context BDT\n"); + rc = -1; + goto out; + } + + /* + * Allocate task_ctx which is an array of pointers pointing to + * a page containing 32 task contexts + */ + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); + hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), + GFP_KERNEL); + if (!hba->task_ctx) { + printk(KERN_ERR PFX "unable to allocate task context array\n"); + rc = -1; + goto out1; + } + + /* + * Allocate task_ctx_dma which is an array of dma addresses + */ + hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * + sizeof(dma_addr_t)), GFP_KERNEL); + if (!hba->task_ctx_dma) { + printk(KERN_ERR PFX "unable to alloc context mapping array\n"); + rc = -1; + goto out2; + } + + task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; + for (i = 0; i < task_ctx_arr_sz; i++) { + + hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, + &hba->task_ctx_dma[i], + GFP_KERNEL); + if (!hba->task_ctx[i]) { + printk(KERN_ERR PFX "unable to alloc task context\n"); + rc = -1; + goto out3; + } + addr = (u64)hba->task_ctx_dma[i]; + task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); + task_ctx_bdt->lo = cpu_to_le32((u32)addr); + task_ctx_bdt++; + } + return 0; + +out3: + for (i = 0; i < task_ctx_arr_sz; i++) { + if (hba->task_ctx[i]) { + + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +out2: + kfree(hba->task_ctx); + hba->task_ctx = NULL; +out1: + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; +out: + return rc; +} + +void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) +{ + int task_ctx_arr_sz; + int i; + + if (hba->task_ctx_bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx_bd_tbl, + hba->task_ctx_bd_dma); + hba->task_ctx_bd_tbl = NULL; + } + + task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); + if (hba->task_ctx) { + for (i = 0; i < task_ctx_arr_sz; i++) { + if (hba->task_ctx[i]) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->task_ctx[i], + hba->task_ctx_dma[i]); + hba->task_ctx[i] = NULL; + } + } + kfree(hba->task_ctx); + hba->task_ctx = NULL; + } + + kfree(hba->task_ctx_dma); + hba->task_ctx_dma = NULL; +} + +static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int segment_count; + u32 *pbl; + + if (hba->hash_tbl_segments) { + + pbl = hba->hash_tbl_pbl; + if (pbl) { + segment_count = hba->hash_tbl_segment_count; + for (i = 0; i < segment_count; ++i) { + dma_addr_t dma_address; + + dma_address = le32_to_cpu(*pbl); + ++pbl; + dma_address += ((u64)le32_to_cpu(*pbl)) << 32; + ++pbl; + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_address); + } + } + + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + } + + if (hba->hash_tbl_pbl) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->hash_tbl_pbl, + hba->hash_tbl_pbl_dma); + hba->hash_tbl_pbl = NULL; + } +} + +static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) +{ + int i; + int hash_table_size; + int segment_count; + int segment_array_size; + int dma_segment_array_size; + dma_addr_t *dma_segment_array; + u32 *pbl; + + hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * + sizeof(struct fcoe_hash_table_entry); + + segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; + segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; + hba->hash_tbl_segment_count = segment_count; + + segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); + hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); + if (!hba->hash_tbl_segments) { + printk(KERN_ERR PFX "hash table pointers alloc failed\n"); + return -ENOMEM; + } + dma_segment_array_size = segment_count * sizeof(*dma_segment_array); + dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); + if (!dma_segment_array) { + printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); + goto cleanup_ht; + } + + for (i = 0; i < segment_count; ++i) { + hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + &dma_segment_array[i], + GFP_KERNEL); + if (!hba->hash_tbl_segments[i]) { + printk(KERN_ERR PFX "hash segment alloc failed\n"); + goto cleanup_dma; + } + } + + hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->hash_tbl_pbl_dma, + GFP_KERNEL); + if (!hba->hash_tbl_pbl) { + printk(KERN_ERR PFX "hash table pbl alloc failed\n"); + goto cleanup_dma; + } + + pbl = hba->hash_tbl_pbl; + for (i = 0; i < segment_count; ++i) { + u64 paddr = dma_segment_array[i]; + *pbl = cpu_to_le32((u32) paddr); + ++pbl; + *pbl = cpu_to_le32((u32) (paddr >> 32)); + ++pbl; + } + pbl = hba->hash_tbl_pbl; + i = 0; + while (*pbl && *(pbl + 1)) { + ++pbl; + ++pbl; + ++i; + } + kfree(dma_segment_array); + return 0; + +cleanup_dma: + for (i = 0; i < segment_count; ++i) { + if (hba->hash_tbl_segments[i]) + dma_free_coherent(&hba->pcidev->dev, + BNX2FC_HASH_TBL_CHUNK_SIZE, + hba->hash_tbl_segments[i], + dma_segment_array[i]); + } + + kfree(dma_segment_array); + +cleanup_ht: + kfree(hba->hash_tbl_segments); + hba->hash_tbl_segments = NULL; + return -ENOMEM; +} + +/** + * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer + * + * @hba: Pointer to adapter structure + * + */ +int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) +{ + u64 addr; + u32 mem_size; + int i; + + if (bnx2fc_allocate_hash_table(hba)) + return -ENOMEM; + + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_ptr_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl_ptr) { + printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, + &hba->t2_hash_tbl_dma, + GFP_KERNEL); + if (!hba->t2_hash_tbl) { + printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + addr = (unsigned long) hba->t2_hash_tbl_dma + + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); + hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; + hba->t2_hash_tbl[i].next.hi = addr >> 32; + } + + hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, + PAGE_SIZE, &hba->dummy_buf_dma, + GFP_KERNEL); + if (!hba->dummy_buffer) { + printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, + &hba->stats_buf_dma, + GFP_KERNEL); + if (!hba->stats_buffer) { + printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); + bnx2fc_free_fw_resc(hba); + return -ENOMEM; + } + + return 0; +} + +void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) +{ + u32 mem_size; + + if (hba->stats_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->stats_buffer, hba->stats_buf_dma); + hba->stats_buffer = NULL; + } + + if (hba->dummy_buffer) { + dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, + hba->dummy_buffer, hba->dummy_buf_dma); + hba->dummy_buffer = NULL; + } + + if (hba->t2_hash_tbl_ptr) { + mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl_ptr, + hba->t2_hash_tbl_ptr_dma); + hba->t2_hash_tbl_ptr = NULL; + } + + if (hba->t2_hash_tbl) { + mem_size = BNX2FC_NUM_MAX_SESS * + sizeof(struct fcoe_t2_hash_table_entry); + dma_free_coherent(&hba->pcidev->dev, mem_size, + hba->t2_hash_tbl, hba->t2_hash_tbl_dma); + hba->t2_hash_tbl = NULL; + } + bnx2fc_free_hash_table(hba); +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c new file mode 100644 index 000000000..b42a9accb --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -0,0 +1,2102 @@ +/* bnx2fc_io.c: QLogic Linux FCoE offload driver. + * IO manager and SCSI IO processing. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" + +#define RESERVE_FREE_LIST_INDEX num_possible_cpus() + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index); +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq, unsigned char *rq_data); + +void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req, + unsigned int timer_msec) +{ + struct bnx2fc_interface *interface = io_req->port->priv; + + if (queue_delayed_work(interface->timer_work_queue, + &io_req->timeout_work, + msecs_to_jiffies(timer_msec))) + kref_get(&io_req->refcount); +} + +static void bnx2fc_cmd_timeout(struct work_struct *work) +{ + struct bnx2fc_cmd *io_req = container_of(work, struct bnx2fc_cmd, + timeout_work.work); + u8 cmd_type = io_req->cmd_type; + struct bnx2fc_rport *tgt = io_req->tgt; + int rc; + + BNX2FC_IO_DBG(io_req, "cmd_timeout, cmd_type = %d," + "req_flags = %lx\n", cmd_type, io_req->req_flags); + + spin_lock_bh(&tgt->tgt_lock); + if (test_and_clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + /* + * ideally we should hold the io_req until RRQ complets, + * and release io_req from timeout hold. + */ + spin_unlock_bh(&tgt->tgt_lock); + bnx2fc_send_rrq(io_req); + return; + } + if (test_and_clear_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO ready for reuse now\n"); + goto done; + } + + switch (cmd_type) { + case BNX2FC_SCSI_CMD: + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort timed out\n"); + complete(&io_req->abts_done); + } else if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + /* Handle internally generated ABTS timeout */ + BNX2FC_IO_DBG(io_req, "ABTS timed out refcnt = %d\n", + kref_read(&io_req->refcount)); + if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* + * Cleanup and return original command to + * mid-layer. + */ + bnx2fc_initiate_cleanup(io_req); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } + } else { + /* Hanlde IO timeout */ + BNX2FC_IO_DBG(io_req, "IO timed out. issue ABTS\n"); + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed before " + " timer expiry\n"); + goto done; + } + + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &io_req->req_flags)) { + rc = bnx2fc_initiate_abts(io_req); + if (rc == SUCCESS) + goto done; + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } else { + BNX2FC_IO_DBG(io_req, "IO already in " + "ABTS processing\n"); + } + } + break; + case BNX2FC_ELS: + + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "ABTS for ELS timed out\n"); + + if (!test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + + return; + } + } else { + /* + * Handle ELS timeout. + * tgt_lock is used to sync compl path and timeout + * path. If els compl path is processing this IO, we + * have nothing to do here, just release the timer hold + */ + BNX2FC_IO_DBG(io_req, "ELS timed out\n"); + if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE, + &io_req->req_flags)) + goto done; + + /* Indicate the cb_func that this ELS is timed out */ + set_bit(BNX2FC_FLAG_ELS_TIMEOUT, &io_req->req_flags); + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + } + break; + default: + printk(KERN_ERR PFX "cmd_timeout: invalid cmd_type %d\n", + cmd_type); + break; + } + +done: + /* release the cmd that was held when timer was set */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_scsi_done(struct bnx2fc_cmd *io_req, int err_code) +{ + /* Called with host lock held */ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + /* + * active_cmd_queue may have other command types as well, + * and during flush operation, we want to error back only + * scsi commands. + */ + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + return; + + BNX2FC_IO_DBG(io_req, "scsi_done. err_code = 0x%x\n", err_code); + if (test_bit(BNX2FC_FLAG_CMD_LOST, &io_req->req_flags)) { + /* Do not call scsi done for this IO */ + return; + } + + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + + /* Sanity checks before returning command to mid-layer */ + if (!sc_cmd) { + printk(KERN_ERR PFX "scsi_done - sc_cmd NULL. " + "IO(0x%x) already cleaned up\n", + io_req->xid); + return; + } + if (!sc_cmd->device) { + pr_err(PFX "0x%x: sc_cmd->device is NULL.\n", io_req->xid); + return; + } + if (!sc_cmd->device->host) { + pr_err(PFX "0x%x: sc_cmd->device->host is NULL.\n", + io_req->xid); + return; + } + + sc_cmd->result = err_code << 16; + + BNX2FC_IO_DBG(io_req, "sc=%p, result=0x%x, retries=%d, allowed=%d\n", + sc_cmd, host_byte(sc_cmd->result), sc_cmd->retries, + sc_cmd->allowed); + scsi_set_resid(sc_cmd, scsi_bufflen(sc_cmd)); + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); +} + +struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) +{ + struct bnx2fc_cmd_mgr *cmgr; + struct io_bdt *bdt_info; + struct bnx2fc_cmd *io_req; + size_t len; + u32 mem_size; + u16 xid; + int i; + int num_ios, num_pri_ios; + size_t bd_tbl_sz; + int arr_sz = num_possible_cpus() + 1; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = hba->max_xid; + + if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN) { + printk(KERN_ERR PFX "cmd_mgr_alloc: Invalid min_xid 0x%x \ + and max_xid 0x%x\n", min_xid, max_xid); + return NULL; + } + BNX2FC_MISC_DBG("min xid 0x%x, max xid 0x%x\n", min_xid, max_xid); + + num_ios = max_xid - min_xid + 1; + len = (num_ios * (sizeof(struct bnx2fc_cmd *))); + len += sizeof(struct bnx2fc_cmd_mgr); + + cmgr = kzalloc(len, GFP_KERNEL); + if (!cmgr) { + printk(KERN_ERR PFX "failed to alloc cmgr\n"); + return NULL; + } + + cmgr->hba = hba; + cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), + GFP_KERNEL); + if (!cmgr->free_list) { + printk(KERN_ERR PFX "failed to alloc free_list\n"); + goto mem_err; + } + + cmgr->free_list_lock = kcalloc(arr_sz, sizeof(*cmgr->free_list_lock), + GFP_KERNEL); + if (!cmgr->free_list_lock) { + printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); + kfree(cmgr->free_list); + cmgr->free_list = NULL; + goto mem_err; + } + + cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); + + for (i = 0; i < arr_sz; i++) { + INIT_LIST_HEAD(&cmgr->free_list[i]); + spin_lock_init(&cmgr->free_list_lock[i]); + } + + /* + * Pre-allocated pool of bnx2fc_cmds. + * Last entry in the free list array is the free list + * of slow path requests. + */ + xid = BNX2FC_MIN_XID; + num_pri_ios = num_ios - hba->elstm_xids; + for (i = 0; i < num_ios; i++) { + io_req = kzalloc(sizeof(*io_req), GFP_KERNEL); + + if (!io_req) { + printk(KERN_ERR PFX "failed to alloc io_req\n"); + goto mem_err; + } + + INIT_LIST_HEAD(&io_req->link); + INIT_DELAYED_WORK(&io_req->timeout_work, bnx2fc_cmd_timeout); + + io_req->xid = xid++; + if (i < num_pri_ios) + list_add_tail(&io_req->link, + &cmgr->free_list[io_req->xid % + num_possible_cpus()]); + else + list_add_tail(&io_req->link, + &cmgr->free_list[num_possible_cpus()]); + io_req++; + } + + /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ + mem_size = num_ios * sizeof(struct io_bdt *); + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool) { + printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); + goto mem_err; + } + + mem_size = sizeof(struct io_bdt); + for (i = 0; i < num_ios; i++) { + cmgr->io_bdt_pool[i] = kmalloc(mem_size, GFP_KERNEL); + if (!cmgr->io_bdt_pool[i]) { + printk(KERN_ERR PFX "failed to alloc " + "io_bdt_pool[%d]\n", i); + goto mem_err; + } + } + + /* Allocate an map fcoe_bdt_ctx structures */ + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + bdt_info->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, + bd_tbl_sz, + &bdt_info->bd_tbl_dma, + GFP_KERNEL); + if (!bdt_info->bd_tbl) { + printk(KERN_ERR PFX "failed to alloc " + "bdt_tbl[%d]\n", i); + goto mem_err; + } + } + + return cmgr; + +mem_err: + bnx2fc_cmd_mgr_free(cmgr); + return NULL; +} + +void bnx2fc_cmd_mgr_free(struct bnx2fc_cmd_mgr *cmgr) +{ + struct io_bdt *bdt_info; + struct bnx2fc_hba *hba = cmgr->hba; + size_t bd_tbl_sz; + u16 min_xid = BNX2FC_MIN_XID; + u16 max_xid = hba->max_xid; + int num_ios; + int i; + + num_ios = max_xid - min_xid + 1; + + /* Free fcoe_bdt_ctx structures */ + if (!cmgr->io_bdt_pool) + goto free_cmd_pool; + + bd_tbl_sz = BNX2FC_MAX_BDS_PER_CMD * sizeof(struct fcoe_bd_ctx); + for (i = 0; i < num_ios; i++) { + bdt_info = cmgr->io_bdt_pool[i]; + if (bdt_info->bd_tbl) { + dma_free_coherent(&hba->pcidev->dev, bd_tbl_sz, + bdt_info->bd_tbl, + bdt_info->bd_tbl_dma); + bdt_info->bd_tbl = NULL; + } + } + + /* Destroy io_bdt pool */ + for (i = 0; i < num_ios; i++) { + kfree(cmgr->io_bdt_pool[i]); + cmgr->io_bdt_pool[i] = NULL; + } + + kfree(cmgr->io_bdt_pool); + cmgr->io_bdt_pool = NULL; + +free_cmd_pool: + kfree(cmgr->free_list_lock); + + /* Destroy cmd pool */ + if (!cmgr->free_list) + goto free_cmgr; + + for (i = 0; i < num_possible_cpus() + 1; i++) { + struct bnx2fc_cmd *tmp, *io_req; + + list_for_each_entry_safe(io_req, tmp, + &cmgr->free_list[i], link) { + list_del(&io_req->link); + kfree(io_req); + } + } + kfree(cmgr->free_list); +free_cmgr: + /* Free command manager itself */ + kfree(cmgr); +} + +struct bnx2fc_cmd *bnx2fc_elstm_alloc(struct bnx2fc_rport *tgt, int type) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + int index = RESERVE_FREE_LIST_INDEX; + u32 free_sqes; + u32 max_sqes; + u16 xid; + + max_sqes = tgt->max_sqes; + switch (type) { + case BNX2FC_TASK_MGMT_CMD: + max_sqes = BNX2FC_TM_MAX_SQES; + break; + case BNX2FC_ELS: + max_sqes = BNX2FC_ELS_MAX_SQES; + break; + default: + break; + } + + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); + if ((list_empty(&(cmd_mgr->free_list[index]))) || + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { + BNX2FC_TGT_DBG(tgt, "No free els_tm cmds available " + "ios(%d):sqes(%d)\n", + tgt->num_active_ios.counter, tgt->max_sqes); + if (list_empty(&(cmd_mgr->free_list[index]))) + printk(KERN_ERR PFX "elstm_alloc: list_empty\n"); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[index].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + io_req->cmd_type = type; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +struct bnx2fc_cmd *bnx2fc_cmd_alloc(struct bnx2fc_rport *tgt) +{ + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_cmd_mgr *cmd_mgr = interface->hba->cmd_mgr; + struct bnx2fc_cmd *io_req; + struct list_head *listp; + struct io_bdt *bd_tbl; + u32 free_sqes; + u32 max_sqes; + u16 xid; + int index = raw_smp_processor_id(); + + max_sqes = BNX2FC_SCSI_MAX_SQES; + /* + * NOTE: Free list insertions and deletions are protected with + * cmgr lock + */ + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + free_sqes = atomic_read(&tgt->free_sqes); + if ((list_empty(&cmd_mgr->free_list[index])) || + (tgt->num_active_ios.counter >= max_sqes) || + (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + return NULL; + } + + listp = (struct list_head *) + cmd_mgr->free_list[index].next; + list_del_init(listp); + io_req = (struct bnx2fc_cmd *) listp; + xid = io_req->xid; + cmd_mgr->cmds[xid] = io_req; + atomic_inc(&tgt->num_active_ios); + atomic_dec(&tgt->free_sqes); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + + INIT_LIST_HEAD(&io_req->link); + + io_req->port = port; + io_req->cmd_mgr = cmd_mgr; + io_req->req_flags = 0; + + /* Bind io_bdt for this io_req */ + /* Have a static link between io_req and io_bdt_pool */ + bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid]; + bd_tbl->io_req = io_req; + + /* Hold the io_req against deletion */ + kref_init(&io_req->refcount); + return io_req; +} + +void bnx2fc_cmd_release(struct kref *ref) +{ + struct bnx2fc_cmd *io_req = container_of(ref, + struct bnx2fc_cmd, refcount); + struct bnx2fc_cmd_mgr *cmd_mgr = io_req->cmd_mgr; + int index; + + if (io_req->cmd_type == BNX2FC_SCSI_CMD) + index = io_req->xid % num_possible_cpus(); + else + index = RESERVE_FREE_LIST_INDEX; + + + spin_lock_bh(&cmd_mgr->free_list_lock[index]); + if (io_req->cmd_type != BNX2FC_SCSI_CMD) + bnx2fc_free_mp_resc(io_req); + cmd_mgr->cmds[io_req->xid] = NULL; + /* Delete IO from retire queue */ + list_del_init(&io_req->link); + /* Add it to the free list */ + list_add(&io_req->link, + &cmd_mgr->free_list[index]); + atomic_dec(&io_req->tgt->num_active_ios); + spin_unlock_bh(&cmd_mgr->free_list_lock[index]); + +} + +static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + size_t sz = sizeof(struct fcoe_bd_ctx); + + /* clear tm flags */ + mp_req->tm_flags = 0; + if (mp_req->mp_req_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_req_bd, + mp_req->mp_req_bd_dma); + mp_req->mp_req_bd = NULL; + } + if (mp_req->mp_resp_bd) { + dma_free_coherent(&hba->pcidev->dev, sz, + mp_req->mp_resp_bd, + mp_req->mp_resp_bd_dma); + mp_req->mp_resp_bd = NULL; + } + if (mp_req->req_buf) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + mp_req->req_buf, + mp_req->req_buf_dma); + mp_req->req_buf = NULL; + } + if (mp_req->resp_buf) { + dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + mp_req->resp_buf, + mp_req->resp_buf_dma); + mp_req->resp_buf = NULL; + } +} + +int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_mp_req *mp_req; + struct fcoe_bd_ctx *mp_req_bd; + struct fcoe_bd_ctx *mp_resp_bd; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + dma_addr_t addr; + size_t sz; + + mp_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + memset(mp_req, 0, sizeof(struct bnx2fc_mp_req)); + + if (io_req->cmd_type != BNX2FC_ELS) { + mp_req->req_len = sizeof(struct fcp_cmnd); + io_req->data_xfer_len = mp_req->req_len; + } else + mp_req->req_len = io_req->data_xfer_len; + + mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &mp_req->req_buf_dma, + GFP_ATOMIC); + if (!mp_req->req_buf) { + printk(KERN_ERR PFX "unable to alloc MP req buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + + mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE, + &mp_req->resp_buf_dma, + GFP_ATOMIC); + if (!mp_req->resp_buf) { + printk(KERN_ERR PFX "unable to alloc TM resp buffer\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE); + memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE); + + /* Allocate and map mp_req_bd and mp_resp_bd */ + sz = sizeof(struct fcoe_bd_ctx); + mp_req->mp_req_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_req_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_req_bd) { + printk(KERN_ERR PFX "unable to alloc MP req bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz, + &mp_req->mp_resp_bd_dma, + GFP_ATOMIC); + if (!mp_req->mp_resp_bd) { + printk(KERN_ERR PFX "unable to alloc MP resp bd\n"); + bnx2fc_free_mp_resc(io_req); + return FAILED; + } + /* Fill bd table */ + addr = mp_req->req_buf_dma; + mp_req_bd = mp_req->mp_req_bd; + mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_req_bd->buf_len = CNIC_PAGE_SIZE; + mp_req_bd->flags = 0; + + /* + * MP buffer is either a task mgmt command or an ELS. + * So the assumption is that it consumes a single bd + * entry in the bd table + */ + mp_resp_bd = mp_req->mp_resp_bd; + addr = mp_req->resp_buf_dma; + mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff; + mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32); + mp_resp_bd->buf_len = CNIC_PAGE_SIZE; + mp_resp_bd->flags = 0; + + return SUCCESS; +} + +static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) +{ + struct fc_lport *lport; + struct fc_rport *rport; + struct fc_rport_libfc_priv *rp; + struct fcoe_port *port; + struct bnx2fc_interface *interface; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + struct bnx2fc_mp_req *tm_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct Scsi_Host *host = sc_cmd->device->host; + struct fc_frame_header *fc_hdr; + struct fcp_cmnd *fcp_cmnd; + int task_idx, index; + int rc = SUCCESS; + u16 xid; + u32 sid, did; + unsigned long start = jiffies; + + lport = shost_priv(host); + rport = starget_to_rport(scsi_target(sc_cmd->device)); + port = lport_priv(lport); + interface = port->priv; + + if (rport == NULL) { + printk(KERN_ERR PFX "device_reset: rport is NULL\n"); + rc = FAILED; + goto tmf_err; + } + rp = rport->dd_data; + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "device_reset: link is not ready\n"); + rc = FAILED; + goto tmf_err; + } + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) { + printk(KERN_ERR PFX "device_reset: tgt not offloaded\n"); + rc = FAILED; + goto tmf_err; + } +retry_tmf: + io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_TASK_MGMT_CMD); + if (!io_req) { + if (time_after(jiffies, start + HZ)) { + printk(KERN_ERR PFX "tmf: Failed TMF"); + rc = FAILED; + goto tmf_err; + } + msleep(20); + goto retry_tmf; + } + /* Initialize rest of io_req fields */ + io_req->sc_cmd = sc_cmd; + io_req->port = port; + io_req->tgt = tgt; + + tm_req = (struct bnx2fc_mp_req *)&(io_req->mp_req); + + rc = bnx2fc_init_mp_req(io_req); + if (rc == FAILED) { + printk(KERN_ERR PFX "Task mgmt MP request init failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + goto tmf_err; + } + + /* Set TM flags */ + io_req->io_req_flags = 0; + tm_req->tm_flags = tm_flags; + + /* Fill FCP_CMND */ + bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tm_req->req_buf); + fcp_cmnd = (struct fcp_cmnd *)tm_req->req_buf; + memset(fcp_cmnd->fc_cdb, 0, sc_cmd->cmd_len); + fcp_cmnd->fc_dl = 0; + + /* Fill FC header */ + fc_hdr = &(tm_req->req_fc_hdr); + sid = tgt->sid; + did = rport->port_id; + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_DD_UNSOL_CMD, did, sid, + FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + /* Obtain exchange id */ + xid = io_req->xid; + + BNX2FC_TGT_DBG(tgt, "Initiate TMF - xid = 0x%x\n", xid); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(io_req, task); + + bnx2fc_priv(sc_cmd)->io_req = io_req; + + /* Obtain free SQ entry */ + spin_lock_bh(&tgt->tgt_lock); + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_tm_queue */ + io_req->on_tmf_queue = 1; + list_add_tail(&io_req->link, &tgt->active_tm_queue); + + init_completion(&io_req->abts_done); + io_req->wait_for_abts_comp = 1; + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + spin_unlock_bh(&tgt->tgt_lock); + + rc = wait_for_completion_timeout(&io_req->abts_done, + interface->tm_timeout * HZ); + spin_lock_bh(&tgt->tgt_lock); + + io_req->wait_for_abts_comp = 0; + if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) { + set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); + if (io_req->on_tmf_queue) { + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } + io_req->wait_for_cleanup_comp = 1; + init_completion(&io_req->cleanup_done); + bnx2fc_initiate_cleanup(io_req); + spin_unlock_bh(&tgt->tgt_lock); + rc = wait_for_completion_timeout(&io_req->cleanup_done, + BNX2FC_FW_TIMEOUT); + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + if (!rc) + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + spin_unlock_bh(&tgt->tgt_lock); + + if (!rc) { + BNX2FC_TGT_DBG(tgt, "task mgmt command failed...\n"); + rc = FAILED; + } else { + BNX2FC_TGT_DBG(tgt, "task mgmt command success...\n"); + rc = SUCCESS; + } +tmf_err: + return rc; +} + +int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req) +{ + struct fc_lport *lport; + struct bnx2fc_rport *tgt = io_req->tgt; + struct fc_rport *rport = tgt->rport; + struct fc_rport_priv *rdata = tgt->rdata; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *abts_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct fc_frame_header *fc_hdr; + struct bnx2fc_mp_req *abts_req; + int task_idx, index; + u32 sid, did; + u16 xid; + int rc = SUCCESS; + u32 r_a_tov = rdata->r_a_tov; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_abts\n"); + + port = io_req->port; + interface = port->priv; + lport = port->lport; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "initiate_abts: tgt not offloaded\n"); + rc = FAILED; + goto abts_err; + } + + if (rport == NULL) { + printk(KERN_ERR PFX "initiate_abts: rport is NULL\n"); + rc = FAILED; + goto abts_err; + } + + if (lport->state != LPORT_ST_READY || !(lport->link_up)) { + printk(KERN_ERR PFX "initiate_abts: link is not ready\n"); + rc = FAILED; + goto abts_err; + } + + abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS); + if (!abts_io_req) { + printk(KERN_ERR PFX "abts: couldn't allocate cmd\n"); + rc = FAILED; + goto abts_err; + } + + /* Initialize rest of io_req fields */ + abts_io_req->sc_cmd = NULL; + abts_io_req->port = port; + abts_io_req->tgt = tgt; + abts_io_req->data_xfer_len = 0; /* No data transfer for ABTS */ + + abts_req = (struct bnx2fc_mp_req *)&(abts_io_req->mp_req); + memset(abts_req, 0, sizeof(struct bnx2fc_mp_req)); + + /* Fill FC header */ + fc_hdr = &(abts_req->req_fc_hdr); + + /* Obtain oxid and rxid for the original exchange to be aborted */ + fc_hdr->fh_ox_id = htons(io_req->xid); + fc_hdr->fh_rx_id = htons(io_req->task->rxwr_txrd.var_ctx.rx_id); + + sid = tgt->sid; + did = rport->port_id; + + __fc_fill_fc_hdr(fc_hdr, FC_RCTL_BA_ABTS, did, sid, + FC_TYPE_BLS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | + FC_FC_SEQ_INIT, 0); + + xid = abts_io_req->xid; + BNX2FC_IO_DBG(abts_io_req, "ABTS io_req\n"); + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_mp_task(abts_io_req, task); + + /* + * ABTS task is a temporary task that will be cleaned up + * irrespective of ABTS response. We need to start the timer + * for the original exchange, as the CQE is posted for the original + * IO request. + * + * Timer for ABTS is started only when it is originated by a + * TM request. For the ABTS issued as part of ULP timeout, + * scsi-ml maintains the timers. + */ + + /* if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags))*/ + bnx2fc_cmd_timer_set(io_req, 2 * r_a_tov); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +abts_err: + return rc; +} + +int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, + enum fc_rctl r_ctl) +{ + struct bnx2fc_rport *tgt = orig_io_req->tgt; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *seq_clnp_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct bnx2fc_els_cb_arg *cb_arg = NULL; + int task_idx, index; + u16 xid; + int rc = 0; + + BNX2FC_IO_DBG(orig_io_req, "bnx2fc_initiate_seq_cleanup xid = 0x%x\n", + orig_io_req->xid); + kref_get(&orig_io_req->refcount); + + port = orig_io_req->port; + interface = port->priv; + + cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); + if (!cb_arg) { + printk(KERN_ERR PFX "Unable to alloc cb_arg for seq clnup\n"); + rc = -ENOMEM; + goto cleanup_err; + } + + seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP); + if (!seq_clnp_req) { + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); + rc = -ENOMEM; + kfree(cb_arg); + goto cleanup_err; + } + /* Initialize rest of io_req fields */ + seq_clnp_req->sc_cmd = NULL; + seq_clnp_req->port = port; + seq_clnp_req->tgt = tgt; + seq_clnp_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = seq_clnp_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + cb_arg->aborted_io_req = orig_io_req; + cb_arg->io_req = seq_clnp_req; + cb_arg->r_ctl = r_ctl; + cb_arg->offset = offset; + seq_clnp_req->cb_arg = cb_arg; + + printk(KERN_ERR PFX "call init_seq_cleanup_task\n"); + bnx2fc_init_seq_cleanup_task(seq_clnp_req, task, orig_io_req, offset); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); +cleanup_err: + return rc; +} + +int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_interface *interface; + struct fcoe_port *port; + struct bnx2fc_cmd *cleanup_io_req; + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + int task_idx, index; + u16 xid, orig_xid; + int rc = 0; + + /* ASSUMPTION: called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_initiate_cleanup\n"); + + port = io_req->port; + interface = port->priv; + + cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); + if (!cleanup_io_req) { + printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n"); + rc = -1; + goto cleanup_err; + } + + /* Initialize rest of io_req fields */ + cleanup_io_req->sc_cmd = NULL; + cleanup_io_req->port = port; + cleanup_io_req->tgt = tgt; + cleanup_io_req->data_xfer_len = 0; /* No data transfer for cleanup */ + + xid = cleanup_io_req->xid; + + task_idx = xid/BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) + interface->hba->task_ctx[task_idx]; + task = &(task_page[index]); + orig_xid = io_req->xid; + + BNX2FC_IO_DBG(io_req, "CLEANUP io_req xid = 0x%x\n", xid); + + bnx2fc_init_cleanup_task(cleanup_io_req, task, orig_xid); + + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Set flag that cleanup request is pending with the firmware */ + set_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + +cleanup_err: + return rc; +} + +/** + * bnx2fc_eh_target_reset: Reset a target + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_target_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_TGT_RESET); +} + +/** + * bnx2fc_eh_device_reset - Reset a single LUN + * + * @sc_cmd: SCSI command + * + * Set from SCSI host template to send task mgmt command to the target + * and wait for the response + */ +int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd) +{ + return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); +} + +static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) + __must_hold(&tgt->tgt_lock) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + unsigned int time_left; + + init_completion(&io_req->cleanup_done); + io_req->wait_for_cleanup_comp = 1; + bnx2fc_initiate_cleanup(io_req); + + spin_unlock_bh(&tgt->tgt_lock); + + /* + * Can't wait forever on cleanup response lest we let the SCSI error + * handler wait forever + */ + time_left = wait_for_completion_timeout(&io_req->cleanup_done, + BNX2FC_FW_TIMEOUT); + if (!time_left) { + BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n", + __func__); + + /* + * Put the extra reference to the SCSI command since it would + * not have been returned in this case. + */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + return SUCCESS; +} + +/** + * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding + * SCSI command + * + * @sc_cmd: SCSI_ML command pointer + * + * SCSI abort request handler + */ +int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) +{ + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_cmd *io_req; + struct fc_lport *lport; + struct bnx2fc_rport *tgt; + int rc; + unsigned int time_left; + + rc = fc_block_scsi_eh(sc_cmd); + if (rc) + return rc; + + lport = shost_priv(sc_cmd->device->host); + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + printk(KERN_ERR PFX "eh_abort: link not ready\n"); + return FAILED; + } + + tgt = (struct bnx2fc_rport *)&rp[1]; + + BNX2FC_TGT_DBG(tgt, "Entered bnx2fc_eh_abort\n"); + + spin_lock_bh(&tgt->tgt_lock); + io_req = bnx2fc_priv(sc_cmd)->io_req; + if (!io_req) { + /* Command might have just completed */ + printk(KERN_ERR PFX "eh_abort: io_req is NULL\n"); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + BNX2FC_IO_DBG(io_req, "eh_abort - refcnt = %d\n", + kref_read(&io_req->refcount)); + + /* Hold IO request across abort processing */ + kref_get(&io_req->refcount); + + BUG_ON(tgt != io_req->tgt); + + /* Remove the io_req from the active_q. */ + /* + * Task Mgmt functions (LUN RESET & TGT RESET) will not + * issue an ABTS on this particular IO req, as the + * io_req is no longer in the active_q. + */ + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "flush in progress\n", io_req->xid); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return SUCCESS; + } + + if (io_req->on_active_queue == 0) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "not on active_q\n", io_req->xid); + /* + * The IO is still with the FW. + * Return failure and let SCSI-ml retry eh_abort. + */ + spin_unlock_bh(&tgt->tgt_lock); + return FAILED; + } + + /* + * Only eh_abort processing will remove the IO from + * active_cmd_q before processing the request. this is + * done to avoid race conditions between IOs aborted + * as part of task management completion and eh_abort + * processing + */ + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + + init_completion(&io_req->abts_done); + init_completion(&io_req->cleanup_done); + + if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { + printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " + "already in abts processing\n", io_req->xid); + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + /* + * We don't want to hold off the upper layer timer so simply + * cleanup the command and return that I/O was successfully + * aborted. + */ + bnx2fc_abts_cleanup(io_req); + /* This only occurs when an task abort was requested while ABTS + is in progress. Setting the IO_CLEANUP flag will skip the + RRQ process in the case when the fw generated SCSI_CMD cmpl + was a result from the ABTS request rather than the CLEANUP + request */ + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + rc = FAILED; + goto done; + } + + /* Cancel the current timer running on this io_req */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags); + io_req->wait_for_abts_comp = 1; + rc = bnx2fc_initiate_abts(io_req); + if (rc == FAILED) { + io_req->wait_for_cleanup_comp = 1; + bnx2fc_initiate_cleanup(io_req); + spin_unlock_bh(&tgt->tgt_lock); + wait_for_completion(&io_req->cleanup_done); + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_cleanup_comp = 0; + goto done; + } + spin_unlock_bh(&tgt->tgt_lock); + + /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ + time_left = wait_for_completion_timeout(&io_req->abts_done, + msecs_to_jiffies(2 * rp->r_a_tov + 1)); + if (time_left) + BNX2FC_IO_DBG(io_req, + "Timed out in eh_abort waiting for abts_done"); + + spin_lock_bh(&tgt->tgt_lock); + io_req->wait_for_abts_comp = 0; + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { + /* Let the scsi-ml try to recover this command */ + printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", + io_req->xid); + /* + * Cleanup firmware residuals before returning control back + * to SCSI ML. + */ + rc = bnx2fc_abts_cleanup(io_req); + goto done; + } else { + /* + * We come here even when there was a race condition + * between timeout and abts completion, and abts + * completion happens just in time. + */ + BNX2FC_IO_DBG(io_req, "abort succeeded\n"); + rc = SUCCESS; + bnx2fc_scsi_done(io_req, DID_ABORT); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +done: + /* release the reference taken in eh_abort */ + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return rc; +} + +void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, + struct fcoe_task_ctx_entry *task, + u8 rx_state) +{ + struct bnx2fc_els_cb_arg *cb_arg = seq_clnp_req->cb_arg; + struct bnx2fc_cmd *orig_io_req = cb_arg->aborted_io_req; + u32 offset = cb_arg->offset; + enum fc_rctl r_ctl = cb_arg->r_ctl; + int rc = 0; + struct bnx2fc_rport *tgt = orig_io_req->tgt; + + BNX2FC_IO_DBG(orig_io_req, "Entered process_cleanup_compl xid = 0x%x" + "cmd_type = %d\n", + seq_clnp_req->xid, seq_clnp_req->cmd_type); + + if (rx_state == FCOE_TASK_RX_STATE_IGNORED_SEQUENCE_CLEANUP) { + printk(KERN_ERR PFX "seq cleanup ignored - xid = 0x%x\n", + seq_clnp_req->xid); + goto free_cb_arg; + } + + spin_unlock_bh(&tgt->tgt_lock); + rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); + spin_lock_bh(&tgt->tgt_lock); + + if (rc) + printk(KERN_ERR PFX "clnup_compl: Unable to send SRR" + " IO will abort\n"); + seq_clnp_req->cb_arg = NULL; + kref_put(&orig_io_req->refcount, bnx2fc_cmd_release); +free_cb_arg: + kfree(cb_arg); + return; +} + +void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + BNX2FC_IO_DBG(io_req, "Entered process_cleanup_compl " + "refcnt = %d, cmd_type = %d\n", + kref_read(&io_req->refcount), io_req->cmd_type); + /* + * Test whether there is a cleanup request pending. If not just + * exit. + */ + if (!test_and_clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, + &io_req->req_flags)) + return; + /* + * If we receive a cleanup completion for this request then the + * firmware will not give us an abort completion for this request + * so clear any ABTS pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags) && + !test_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags)) { + set_bit(BNX2FC_FLAG_ABTS_DONE, &io_req->req_flags); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); +} + +void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq) +{ + u32 r_ctl; + u32 r_a_tov = FC_DEF_R_A_TOV; + u8 issue_rrq = 0; + struct bnx2fc_rport *tgt = io_req->tgt; + + BNX2FC_IO_DBG(io_req, "Entered process_abts_compl xid = 0x%x" + "refcnt = %d, cmd_type = %d\n", + io_req->xid, + kref_read(&io_req->refcount), io_req->cmd_type); + + if (test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "Timer context finished processing" + " this io\n"); + return; + } + + /* + * If we receive an ABTS completion here then we will not receive + * a cleanup completion so clear any cleanup pending flags. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags)) { + clear_bit(BNX2FC_FLAG_ISSUE_CLEANUP_REQ, &io_req->req_flags); + if (io_req->wait_for_cleanup_comp) + complete(&io_req->cleanup_done); + } + + /* Do not issue RRQ as this IO is already cleanedup */ + if (test_and_set_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) + goto io_compl; + + /* + * For ABTS issued due to SCSI eh_abort_handler, timeout + * values are maintained by scsi-ml itself. Cancel timeout + * in case ABTS issued as part of task management function + * or due to FW error. + */ + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + r_ctl = (u8)task->rxwr_only.union_ctx.comp_info.abts_rsp.r_ctl; + + switch (r_ctl) { + case FC_RCTL_BA_ACC: + /* + * Dont release this cmd yet. It will be relesed + * after we get RRQ response + */ + BNX2FC_IO_DBG(io_req, "ABTS response - ACC Send RRQ\n"); + issue_rrq = 1; + break; + + case FC_RCTL_BA_RJT: + BNX2FC_IO_DBG(io_req, "ABTS response - RJT\n"); + break; + default: + printk(KERN_ERR PFX "Unknown ABTS response\n"); + break; + } + + if (issue_rrq) { + BNX2FC_IO_DBG(io_req, "Issue RRQ after R_A_TOV\n"); + set_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + set_bit(BNX2FC_FLAG_RETIRE_OXID, &io_req->req_flags); + bnx2fc_cmd_timer_set(io_req, r_a_tov); + +io_compl: + if (io_req->wait_for_abts_comp) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->abts_done); + } else { + /* + * We end up here when ABTS is issued as + * in asynchronous context, i.e., as part + * of task management completion, or + * when FW error is received or when the + * ABTS is issued when the IO is timed + * out. + */ + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } + bnx2fc_scsi_done(io_req, DID_ERROR); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } +} + +static void bnx2fc_lun_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_cmd *cmd, *tmp; + u64 tm_lun = sc_cmd->device->lun; + u64 lun; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_lun_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { + BNX2FC_TGT_DBG(tgt, "LUN RST cmpl: scan for pending IOs\n"); + lun = cmd->sc_cmd->device->lun; + if (lun == tm_lun) { + /* Initiate ABTS on this cmd */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); + /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldn't fail in this context */ + WARN_ON(rc != SUCCESS); + } else + printk(KERN_ERR PFX "lun_rst: abts already in" + " progress for this IO 0x%x\n", + cmd->xid); + } + } +} + +static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_rport *tgt = io_req->tgt; + struct bnx2fc_cmd *cmd, *tmp; + int rc = 0; + + /* called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered bnx2fc_tgt_reset_cmpl\n"); + /* + * Walk thru the active_ios queue and ABORT the IO + * that matches with the LUN that was reset + */ + list_for_each_entry_safe(cmd, tmp, &tgt->active_cmd_queue, link) { + BNX2FC_TGT_DBG(tgt, "TGT RST cmpl: scan for pending IOs\n"); + /* Initiate ABTS */ + if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, + &cmd->req_flags)) { + /* cancel the IO timeout */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* timer hold */ + rc = bnx2fc_initiate_abts(cmd); + /* abts shouldn't fail in this context */ + WARN_ON(rc != SUCCESS); + + } else + printk(KERN_ERR PFX "tgt_rst: abts already in progress" + " for this IO 0x%x\n", cmd->xid); + } +} + +void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, u8 num_rq, + unsigned char *rq_data) +{ + struct bnx2fc_mp_req *tm_req; + struct fc_frame_header *fc_hdr; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u64 *hdr; + u64 *temp_hdr; + void *rsp_buf; + + /* Called with tgt_lock held */ + BNX2FC_IO_DBG(io_req, "Entered process_tm_compl\n"); + + if (!(test_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags))) + set_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags); + else { + /* TM has already timed out and we got + * delayed completion. Ignore completion + * processing. + */ + return; + } + + tm_req = &(io_req->mp_req); + fc_hdr = &(tm_req->resp_fc_hdr); + hdr = (u64 *)fc_hdr; + temp_hdr = (u64 *) + &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr; + hdr[0] = cpu_to_be64(temp_hdr[0]); + hdr[1] = cpu_to_be64(temp_hdr[1]); + hdr[2] = cpu_to_be64(temp_hdr[2]); + + tm_req->resp_len = + task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len; + + rsp_buf = tm_req->resp_buf; + + if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) { + bnx2fc_parse_fcp_rsp(io_req, + (struct fcoe_fcp_rsp_payload *) + rsp_buf, num_rq, rq_data); + if (io_req->fcp_rsp_code == 0) { + /* TM successful */ + if (tm_req->tm_flags & FCP_TMF_LUN_RESET) + bnx2fc_lun_reset_cmpl(io_req); + else if (tm_req->tm_flags & FCP_TMF_TGT_RESET) + bnx2fc_tgt_reset_cmpl(io_req); + } + } else { + printk(KERN_ERR PFX "tmf's fc_hdr r_ctl = 0x%x\n", + fc_hdr->fh_r_ctl); + } + if (!bnx2fc_priv(sc_cmd)->io_req) { + printk(KERN_ERR PFX "tm_compl: io_req is NULL\n"); + return; + } + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + + default: + BNX2FC_IO_DBG(io_req, "process_tm_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + + sc_cmd = io_req->sc_cmd; + io_req->sc_cmd = NULL; + + /* check if the io_req exists in tgt's tmf_q */ + if (io_req->on_tmf_queue) { + + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + } else { + + printk(KERN_ERR PFX "Command not on active_cmd_queue!\n"); + return; + } + + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + + kref_put(&io_req->refcount, bnx2fc_cmd_release); + if (io_req->wait_for_abts_comp) { + BNX2FC_IO_DBG(io_req, "tm_compl - wake up the waiter\n"); + complete(&io_req->abts_done); + } +} + +static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, + int bd_index) +{ + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int frag_size, sg_frags; + + sg_frags = 0; + while (sg_len) { + if (sg_len >= BNX2FC_BD_SPLIT_SZ) + frag_size = BNX2FC_BD_SPLIT_SZ; + else + frag_size = sg_len; + bd[bd_index + sg_frags].buf_addr_lo = addr & 0xffffffff; + bd[bd_index + sg_frags].buf_addr_hi = addr >> 32; + bd[bd_index + sg_frags].buf_len = (u16)frag_size; + bd[bd_index + sg_frags].flags = 0; + + addr += (u64) frag_size; + sg_frags++; + sg_len -= frag_size; + } + return sg_frags; + +} + +static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) +{ + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + struct scatterlist *sg; + int byte_count = 0; + int sg_count = 0; + int bd_count = 0; + int sg_frags; + unsigned int sg_len; + u64 addr; + int i; + + WARN_ON(scsi_sg_count(sc) > BNX2FC_MAX_BDS_PER_CMD); + /* + * Use dma_map_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ + sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + scsi_for_each_sg(sc, sg, sg_count, i) { + sg_len = sg_dma_len(sg); + addr = sg_dma_address(sg); + if (sg_len > BNX2FC_MAX_BD_LEN) { + sg_frags = bnx2fc_split_bd(io_req, addr, sg_len, + bd_count); + } else { + + sg_frags = 1; + bd[bd_count].buf_addr_lo = addr & 0xffffffff; + bd[bd_count].buf_addr_hi = addr >> 32; + bd[bd_count].buf_len = (u16)sg_len; + bd[bd_count].flags = 0; + } + bd_count += sg_frags; + byte_count += sg_len; + } + if (byte_count != scsi_bufflen(sc)) + printk(KERN_ERR PFX "byte_count = %d != scsi_bufflen = %d, " + "task_id = 0x%x\n", byte_count, scsi_bufflen(sc), + io_req->xid); + return bd_count; +} + +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; + int bd_count; + + if (scsi_sg_count(sc)) { + bd_count = bnx2fc_map_sg(io_req); + if (bd_count == 0) + return -ENOMEM; + } else { + bd_count = 0; + bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; + bd[0].buf_len = bd[0].flags = 0; + } + io_req->bd_tbl->bd_valid = bd_count; + + /* + * Return the command to ML if BD count exceeds the max number + * that can be handled by FW. + */ + if (bd_count > BNX2FC_FW_MAX_BDS_PER_CMD) { + pr_err("bd_count = %d exceeded FW supported max BD(255), task_id = 0x%x\n", + bd_count, io_req->xid); + return -ENOMEM; + } + + return 0; +} + +static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) +{ + struct scsi_cmnd *sc = io_req->sc_cmd; + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; + + /* + * Use dma_unmap_sg directly to ensure we're using the correct + * dev struct off of pcidev. + */ + if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) { + dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); + io_req->bd_tbl->bd_valid = 0; + } +} + +void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req, + struct fcp_cmnd *fcp_cmnd) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + + memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd)); + + int_to_scsilun(sc_cmd->device->lun, &fcp_cmnd->fc_lun); + + fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len); + memcpy(fcp_cmnd->fc_cdb, sc_cmd->cmnd, sc_cmd->cmd_len); + + fcp_cmnd->fc_cmdref = 0; + fcp_cmnd->fc_pri_ta = 0; + fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags; + fcp_cmnd->fc_flags = io_req->io_req_flags; + fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE; +} + +static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, + struct fcoe_fcp_rsp_payload *fcp_rsp, + u8 num_rq, unsigned char *rq_data) +{ + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + u8 rsp_flags = fcp_rsp->fcp_flags.flags; + u32 rq_buff_len = 0; + int fcp_sns_len = 0; + int fcp_rsp_len = 0; + + io_req->fcp_status = FC_GOOD; + io_req->fcp_resid = 0; + if (rsp_flags & (FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER | + FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER)) + io_req->fcp_resid = fcp_rsp->fcp_resid; + + io_req->scsi_comp_flags = rsp_flags; + io_req->cdb_status = fcp_rsp->scsi_status_code; + + /* Fetch fcp_rsp_info and fcp_sns_info if available */ + if (num_rq) { + + /* + * We do not anticipate num_rq >1, as the linux defined + * SCSI_SENSE_BUFFERSIZE is 96 bytes + 8 bytes of FCP_RSP_INFO + * 256 bytes of single rq buffer is good enough to hold this. + */ + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID) { + fcp_rsp_len = rq_buff_len + = fcp_rsp->fcp_rsp_len; + } + + if (rsp_flags & + FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID) { + fcp_sns_len = fcp_rsp->fcp_sns_len; + rq_buff_len += fcp_rsp->fcp_sns_len; + } + + io_req->fcp_rsp_len = fcp_rsp_len; + io_req->fcp_sns_len = fcp_sns_len; + + if (rq_buff_len > num_rq * BNX2FC_RQ_BUF_SZ) { + /* Invalid sense sense length. */ + printk(KERN_ERR PFX "invalid sns length %d\n", + rq_buff_len); + /* reset rq_buff_len */ + rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ; + } + + /* fetch fcp_rsp_code */ + if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) { + /* Only for task management function */ + io_req->fcp_rsp_code = rq_data[3]; + BNX2FC_IO_DBG(io_req, "fcp_rsp_code = %d\n", + io_req->fcp_rsp_code); + } + + /* fetch sense data */ + rq_data += fcp_rsp_len; + + if (fcp_sns_len > SCSI_SENSE_BUFFERSIZE) { + printk(KERN_ERR PFX "Truncating sense buffer\n"); + fcp_sns_len = SCSI_SENSE_BUFFERSIZE; + } + + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); + if (fcp_sns_len) + memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); + + } +} + +/** + * bnx2fc_queuecommand - Queuecommand function of the scsi template + * + * @host: The Scsi_Host the command was issued to + * @sc_cmd: struct scsi_cmnd to be executed + * + * This is the IO strategy routine, called by SCSI-ML + **/ +int bnx2fc_queuecommand(struct Scsi_Host *host, + struct scsi_cmnd *sc_cmd) +{ + struct fc_lport *lport = shost_priv(host); + struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device)); + struct fc_rport_libfc_priv *rp = rport->dd_data; + struct bnx2fc_rport *tgt; + struct bnx2fc_cmd *io_req; + int rc = 0; + int rval; + + rval = fc_remote_port_chkready(rport); + if (rval) { + sc_cmd->result = rval; + scsi_done(sc_cmd); + return 0; + } + + if ((lport->state != LPORT_ST_READY) || !(lport->link_up)) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd; + } + + /* rport and tgt are allocated together, so tgt should be non-NULL */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + /* + * Session is not offloaded yet. Let SCSI-ml retry + * the command. + */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + if (tgt->retry_delay_timestamp) { + if (time_after(jiffies, tgt->retry_delay_timestamp)) { + tgt->retry_delay_timestamp = 0; + } else { + /* If retry_delay timer is active, flow off the ML */ + rc = SCSI_MLQUEUE_TARGET_BUSY; + goto exit_qcmd; + } + } + + spin_lock_bh(&tgt->tgt_lock); + + io_req = bnx2fc_cmd_alloc(tgt); + if (!io_req) { + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd_tgtlock; + } + io_req->sc_cmd = sc_cmd; + + if (bnx2fc_post_io_req(tgt, io_req)) { + printk(KERN_ERR PFX "Unable to post io_req\n"); + rc = SCSI_MLQUEUE_HOST_BUSY; + goto exit_qcmd_tgtlock; + } + +exit_qcmd_tgtlock: + spin_unlock_bh(&tgt->tgt_lock); +exit_qcmd: + return rc; +} + +void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, + struct fcoe_task_ctx_entry *task, + u8 num_rq, unsigned char *rq_data) +{ + struct fcoe_fcp_rsp_payload *fcp_rsp; + struct bnx2fc_rport *tgt = io_req->tgt; + struct scsi_cmnd *sc_cmd; + u16 scope = 0, qualifier = 0; + + /* scsi_cmd_cmpl is called with tgt lock held */ + + if (test_and_set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + /* we will not receive ABTS response for this IO */ + BNX2FC_IO_DBG(io_req, "Timer context finished processing " + "this scsi cmd\n"); + if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, + &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, + "Actual completion after cleanup request cleaning up\n"); + bnx2fc_process_cleanup_compl(io_req, task, num_rq); + } + return; + } + + /* Cancel the timeout_work, as we received IO completion */ + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + sc_cmd = io_req->sc_cmd; + if (sc_cmd == NULL) { + printk(KERN_ERR PFX "scsi_cmd_compl - sc_cmd is NULL\n"); + return; + } + + /* Fetch fcp_rsp from task context and perform cmd completion */ + fcp_rsp = (struct fcoe_fcp_rsp_payload *) + &(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload); + + /* parse fcp_rsp and obtain sense data from RQ if available */ + bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data); + + if (!bnx2fc_priv(sc_cmd)->io_req) { + printk(KERN_ERR PFX "io_req is NULL\n"); + return; + } + + if (io_req->on_active_queue) { + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + /* Move IO req to retire queue */ + list_add_tail(&io_req->link, &tgt->io_retire_queue); + } else { + /* This should not happen, but could have been pulled + * by bnx2fc_flush_active_ios(), or during a race + * between command abort and (late) completion. + */ + BNX2FC_IO_DBG(io_req, "xid not on active_cmd_queue\n"); + if (io_req->wait_for_abts_comp) + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) + complete(&io_req->abts_done); + } + + bnx2fc_unmap_sg_list(io_req); + io_req->sc_cmd = NULL; + + switch (io_req->fcp_status) { + case FC_GOOD: + if (io_req->cdb_status == 0) { + /* Good IO completion */ + sc_cmd->result = DID_OK << 16; + } else { + /* Transport status is good, SCSI status not good */ + BNX2FC_IO_DBG(io_req, "scsi_cmpl: cdb_status = %d" + " fcp_resid = 0x%x\n", + io_req->cdb_status, io_req->fcp_resid); + sc_cmd->result = (DID_OK << 16) | io_req->cdb_status; + + if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || + io_req->cdb_status == SAM_STAT_BUSY) { + /* Newer array firmware with BUSY or + * TASK_SET_FULL may return a status that needs + * the scope bits masked. + * Or a huge delay timestamp up to 27 minutes + * can result. + */ + if (fcp_rsp->retry_delay_timer) { + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer + & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer + & 0x3FFF; + } + if (scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Set the jiffies + + * retry_delay_timer * 100ms + * for the rport/tgt + */ + tgt->retry_delay_timestamp = jiffies + + (qualifier * HZ / 10); + } + } + } + if (io_req->fcp_resid) + scsi_set_resid(sc_cmd, io_req->fcp_resid); + break; + default: + printk(KERN_ERR PFX "scsi_cmd_compl: fcp_status = %d\n", + io_req->fcp_status); + break; + } + bnx2fc_priv(sc_cmd)->io_req = NULL; + scsi_done(sc_cmd); + kref_put(&io_req->refcount, bnx2fc_cmd_release); +} + +int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, + struct bnx2fc_cmd *io_req) +{ + struct fcoe_task_ctx_entry *task; + struct fcoe_task_ctx_entry *task_page; + struct scsi_cmnd *sc_cmd = io_req->sc_cmd; + struct fcoe_port *port = tgt->port; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fc_lport *lport = port->lport; + int task_idx, index; + u16 xid; + + /* bnx2fc_post_io_req() is called with the tgt_lock held */ + + /* Initialize rest of io_req fields */ + io_req->cmd_type = BNX2FC_SCSI_CMD; + io_req->port = port; + io_req->tgt = tgt; + io_req->data_xfer_len = scsi_bufflen(sc_cmd); + bnx2fc_priv(sc_cmd)->io_req = io_req; + + if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { + io_req->io_req_flags = BNX2FC_READ; + this_cpu_inc(lport->stats->InputRequests); + this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len); + } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { + io_req->io_req_flags = BNX2FC_WRITE; + this_cpu_inc(lport->stats->OutputRequests); + this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len); + } else { + io_req->io_req_flags = 0; + this_cpu_inc(lport->stats->ControlRequests); + } + + xid = io_req->xid; + + /* Build buffer descriptor list for firmware from sg list */ + if (bnx2fc_build_bd_list_from_sg(io_req)) { + printk(KERN_ERR PFX "BD list creation failed\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + task_idx = xid / BNX2FC_TASKS_PER_PAGE; + index = xid % BNX2FC_TASKS_PER_PAGE; + + /* Initialize task context for this IO request */ + task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; + task = &(task_page[index]); + bnx2fc_init_task(io_req, task); + + if (tgt->flush_in_prog) { + printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { + printk(KERN_ERR PFX "Session not ready...post_io\n"); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + return -EAGAIN; + } + + /* Time IO req */ + if (tgt->io_timeout) + bnx2fc_cmd_timer_set(io_req, BNX2FC_IO_TIMEOUT); + /* Obtain free SQ entry */ + bnx2fc_add_2_sq(tgt, xid); + + /* Enqueue the io_req to active_cmd_queue */ + + io_req->on_active_queue = 1; + /* move io_req from pending_queue to active_queue */ + list_add_tail(&io_req->link, &tgt->active_cmd_queue); + + /* Ring doorbell */ + bnx2fc_ring_doorbell(tgt); + return 0; +} diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c new file mode 100644 index 000000000..2c246e80c --- /dev/null +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -0,0 +1,896 @@ +/* bnx2fc_tgt.c: QLogic Linux FCoE offload driver. + * Handles operations such as session offload/upload etc, and manages + * session resources such as connection id and qp resources. + * + * Copyright (c) 2008-2013 Broadcom Corporation + * Copyright (c) 2014-2016 QLogic Corporation + * Copyright (c) 2016-2017 Cavium Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) + */ + +#include "bnx2fc.h" +static void bnx2fc_upld_timer(struct timer_list *t); +static void bnx2fc_ofld_timer(struct timer_list *t); +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata); +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt); +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id); + +static void bnx2fc_upld_timer(struct timer_list *t) +{ + + struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer); + + BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n"); + /* fake upload completion */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + wake_up_interruptible(&tgt->upld_wait); +} + +static void bnx2fc_ofld_timer(struct timer_list *t) +{ + + struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer); + + BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n"); + /* NOTE: This function should never be called, as + * offload should never timeout + */ + /* + * If the timer has expired, this session is dead + * Clear offloaded flag and logout of this device. + * Since OFFLOADED flag is cleared, this case + * will be considered as offload error and the + * port will be logged off, and conn_id, session + * resources are freed up in bnx2fc_offload_session + */ + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); + set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + wake_up_interruptible(&tgt->ofld_wait); +} + +static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt) +{ + timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0); + mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT); + + wait_event_interruptible(tgt->ofld_wait, + (test_bit( + BNX2FC_FLAG_OFLD_REQ_CMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&tgt->ofld_timer); +} + +static void bnx2fc_offload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt, + struct fc_rport_priv *rdata) +{ + struct fc_rport *rport = rdata->rport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + int rval; + int i = 0; + + /* Initialize bnx2fc_rport */ + /* NOTE: tgt is already bzero'd */ + rval = bnx2fc_init_tgt(tgt, port, rdata); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate conn id for " + "port_id (%6x)\n", rport->port_id); + goto tgt_init_err; + } + + /* Allocate session resources */ + rval = bnx2fc_alloc_session_resc(hba, tgt); + if (rval) { + printk(KERN_ERR PFX "Failed to allocate resources\n"); + goto ofld_err; + } + + /* + * Initialize FCoE session offload process. + * Upon completion of offload process add + * rport to list of rports + */ +retry_ofld: + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_ofld_req(port, tgt); + if (rval) { + printk(KERN_ERR PFX "ofld_req failed\n"); + goto ofld_err; + } + + /* + * wait for the session is offloaded and enabled. 3 Secs + * should be ample time for this process to complete. + */ + bnx2fc_ofld_wait(tgt); + + if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) { + if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, + &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, " + "retry ofld..%d\n", i++); + msleep_interruptible(1000); + if (i > 3) { + i = 0; + goto ofld_err; + } + goto retry_ofld; + } + goto ofld_err; + } + if (bnx2fc_map_doorbell(tgt)) { + printk(KERN_ERR PFX "map doorbell failed - no mem\n"); + goto ofld_err; + } + clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); + rval = bnx2fc_send_session_enable_req(port, tgt); + if (rval) { + pr_err(PFX "enable session failed\n"); + goto ofld_err; + } + bnx2fc_ofld_wait(tgt); + if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) + goto ofld_err; + return; + +ofld_err: + /* couldn't offload the session. log off from this rport */ + BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); + clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); +tgt_init_err: + if (tgt->fcoe_conn_id != -1) + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + fc_rport_logoff(rdata); +} + +void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) +{ + struct bnx2fc_cmd *io_req; + struct bnx2fc_cmd *tmp; + int rc; + int i = 0; + BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n", + tgt->num_active_ios.counter); + + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 1; + + list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "cleaned up\n"); + complete(&io_req->abts_done); + } + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + } + + set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags); + set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); + + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } + + list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_tmf_queue = 0; + BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n"); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + + list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) { + i++; + list_del_init(&io_req->link); + io_req->on_active_queue = 0; + + BNX2FC_IO_DBG(io_req, "els_queue cleanup\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) + kref_put(&io_req->refcount, + bnx2fc_cmd_release); /* drop timer hold */ + + if ((io_req->cb_func) && (io_req->cb_arg)) { + io_req->cb_func(io_req->cb_arg); + io_req->cb_arg = NULL; + } + + /* Do not issue cleanup when disable request failed */ + if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) + bnx2fc_process_cleanup_compl(io_req, io_req->task, 0); + else { + rc = bnx2fc_initiate_cleanup(io_req); + BUG_ON(rc); + } + } + + list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) { + i++; + list_del_init(&io_req->link); + + BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); + + if (cancel_delayed_work(&io_req->timeout_work)) { + if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT, + &io_req->req_flags)) { + /* Handle eh_abort timeout */ + BNX2FC_IO_DBG(io_req, "eh_abort for IO " + "in retire_q\n"); + if (io_req->wait_for_abts_comp) + complete(&io_req->abts_done); + } + kref_put(&io_req->refcount, bnx2fc_cmd_release); + } + + clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); + } + + BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i); + i = 0; + spin_unlock_bh(&tgt->tgt_lock); + /* wait for active_ios to go to 0 */ + while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT)) + msleep(25); + if (tgt->num_active_ios.counter != 0) + printk(KERN_ERR PFX "CLEANUP on port 0x%x:" + " active_ios = %d\n", + tgt->rdata->ids.port_id, tgt->num_active_ios.counter); + spin_lock_bh(&tgt->tgt_lock); + tgt->flush_in_prog = 0; + spin_unlock_bh(&tgt->tgt_lock); +} + +static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt) +{ + timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0); + mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT); + wait_event_interruptible(tgt->upld_wait, + (test_bit( + BNX2FC_FLAG_UPLD_REQ_COMPL, + &tgt->flags))); + if (signal_pending(current)) + flush_signals(current); + del_timer_sync(&tgt->upld_timer); +} + +static void bnx2fc_upload_session(struct fcoe_port *port, + struct bnx2fc_rport *tgt) +{ + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + + BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n", + tgt->num_active_ios.counter); + + /* + * Called with hba->hba_mutex held. + * This is a blocking call + */ + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_disable_req(port, tgt); + + /* + * wait for upload to complete. 3 Secs + * should be sufficient time for this process to complete. + */ + BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n"); + bnx2fc_upld_wait(tgt); + + /* + * traverse thru the active_q and tmf_q and cleanup + * IOs in these lists + */ + BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n", + tgt->flags); + bnx2fc_flush_active_ios(tgt); + + /* Issue destroy KWQE */ + if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "send destroy req\n"); + clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); + bnx2fc_send_session_destroy_req(hba, tgt); + + /* wait for destroy to complete */ + bnx2fc_upld_wait(tgt); + + if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags))) + printk(KERN_ERR PFX "ERROR!! destroy timed out\n"); + + BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n", + tgt->flags); + + } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) { + printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy" + " not sent to FW\n"); + } else { + printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy" + " not sent to FW\n"); + } + + /* Free session resources */ + bnx2fc_free_session_resc(hba, tgt); + bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); +} + +static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt, + struct fcoe_port *port, + struct fc_rport_priv *rdata) +{ + + struct fc_rport *rport = rdata->rport; + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; + struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; + + tgt->rport = rport; + tgt->rdata = rdata; + tgt->port = port; + + if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) { + BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n"); + tgt->fcoe_conn_id = -1; + return -1; + } + + tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt); + if (tgt->fcoe_conn_id == -1) + return -1; + + BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id); + + tgt->max_sqes = BNX2FC_SQ_WQES_MAX; + tgt->max_rqes = BNX2FC_RQ_WQES_MAX; + tgt->max_cqes = BNX2FC_CQ_WQES_MAX; + atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX); + + /* Initialize the toggle bit */ + tgt->sq_curr_toggle_bit = 1; + tgt->cq_curr_toggle_bit = 1; + tgt->sq_prod_idx = 0; + tgt->cq_cons_idx = 0; + tgt->rq_prod_idx = 0x8000; + tgt->rq_cons_idx = 0; + atomic_set(&tgt->num_active_ios, 0); + tgt->retry_delay_timestamp = 0; + + if (rdata->flags & FC_RP_FLAGS_RETRY && + rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET && + !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) { + tgt->dev_type = TYPE_TAPE; + tgt->io_timeout = 0; /* use default ULP timeout */ + } else { + tgt->dev_type = TYPE_DISK; + tgt->io_timeout = BNX2FC_IO_TIMEOUT; + } + + /* initialize sq doorbell */ + sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE; + sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT; + /* initialize rx doorbell */ + rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) | + (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) | + (B577XX_FCOE_CONNECTION_TYPE << + B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT)); + rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) | + (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT); + + spin_lock_init(&tgt->tgt_lock); + spin_lock_init(&tgt->cq_lock); + + /* Initialize active_cmd_queue list */ + INIT_LIST_HEAD(&tgt->active_cmd_queue); + + /* Initialize IO retire queue */ + INIT_LIST_HEAD(&tgt->io_retire_queue); + + INIT_LIST_HEAD(&tgt->els_queue); + + /* Initialize active_tm_queue list */ + INIT_LIST_HEAD(&tgt->active_tm_queue); + + init_waitqueue_head(&tgt->ofld_wait); + init_waitqueue_head(&tgt->upld_wait); + + return 0; +} + +/* + * This event_callback is called after successful completion of libfc + * initiated target login. bnx2fc can proceed with initiating the session + * establishment. + */ +void bnx2fc_rport_event_handler(struct fc_lport *lport, + struct fc_rport_priv *rdata, + enum fc_rport_event event) +{ + struct fcoe_port *port = lport_priv(lport); + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct fc_rport *rport = rdata->rport; + struct fc_rport_libfc_priv *rp; + struct bnx2fc_rport *tgt; + u32 port_id; + + BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n", + event, rdata->ids.port_id); + switch (event) { + case RPORT_EV_READY: + if (!rport) { + printk(KERN_ERR PFX "rport is NULL: ERROR!\n"); + break; + } + + rp = rport->dd_data; + if (rport->port_id == FC_FID_DIR_SERV) { + /* + * bnx2fc_rport structure doesn't exist for + * directory server. + * We should not come here, as lport will + * take care of fabric login + */ + printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n", + rdata->ids.port_id); + break; + } + + if (rdata->spp_type != FC_TYPE_FCP) { + BNX2FC_HBA_DBG(lport, "not FCP type target." + " not offloading\n"); + break; + } + if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { + BNX2FC_HBA_DBG(lport, "not FCP_TARGET" + " not offloading\n"); + break; + } + + /* + * Offload process is protected with hba mutex. + * Use the same mutex_lock for upload process too + */ + mutex_lock(&hba->hba_mutex); + tgt = (struct bnx2fc_rport *)&rp[1]; + + /* This can happen when ADISC finds the same target */ + if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { + BNX2FC_TGT_DBG(tgt, "already offloaded\n"); + mutex_unlock(&hba->hba_mutex); + return; + } + + /* + * Offload the session. This is a blocking call, and will + * wait until the session is offloaded. + */ + bnx2fc_offload_session(port, tgt, rdata); + + BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + + if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) { + /* Session is offloaded and enabled. */ + BNX2FC_TGT_DBG(tgt, "sess offloaded\n"); + /* This counter is protected with hba mutex */ + hba->num_ofld_sess++; + + set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + } else { + /* + * Offload or enable would have failed. + * In offload/enable completion path, the + * rport would have already been removed + */ + BNX2FC_TGT_DBG(tgt, "Port is being logged off as " + "offloaded flag not set\n"); + } + mutex_unlock(&hba->hba_mutex); + break; + case RPORT_EV_LOGO: + case RPORT_EV_FAILED: + case RPORT_EV_STOP: + port_id = rdata->ids.port_id; + if (port_id == FC_FID_DIR_SERV) + break; + + if (!rport) { + printk(KERN_INFO PFX "%x - rport not created Yet!!\n", + port_id); + break; + } + rp = rport->dd_data; + mutex_lock(&hba->hba_mutex); + /* + * Perform session upload. Note that rdata->peers is already + * removed from disc->rports list before we get this event. + */ + tgt = (struct bnx2fc_rport *)&rp[1]; + + if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) { + mutex_unlock(&hba->hba_mutex); + break; + } + clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags); + + bnx2fc_upload_session(port, tgt); + hba->num_ofld_sess--; + BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n", + hba->num_ofld_sess); + /* + * Try to wake up the linkdown wait thread. If num_ofld_sess + * is 0, the waiting therad wakes up + */ + if ((hba->wait_for_link_down) && + (hba->num_ofld_sess == 0)) { + wake_up_interruptible(&hba->shutdown_wait); + } + mutex_unlock(&hba->hba_mutex); + + break; + + case RPORT_EV_NONE: + break; + } +} + +/** + * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id + * + * @port: fcoe_port struct to lookup the target port on + * @port_id: The remote port ID to look up + */ +struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port, + u32 port_id) +{ + struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; + struct bnx2fc_rport *tgt; + struct fc_rport_priv *rdata; + int i; + + for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { + tgt = hba->tgt_ofld_list[i]; + if ((tgt) && (tgt->port == port)) { + rdata = tgt->rdata; + if (rdata->ids.port_id == port_id) { + if (rdata->rp_state != RPORT_ST_DELETE) { + BNX2FC_TGT_DBG(tgt, "rport " + "obtained\n"); + return tgt; + } else { + BNX2FC_TGT_DBG(tgt, "rport 0x%x " + "is in DELETED state\n", + rdata->ids.port_id); + return NULL; + } + } + } + } + return NULL; +} + + +/** + * bnx2fc_alloc_conn_id - allocates FCOE Connection id + * + * @hba: pointer to adapter structure + * @tgt: pointer to bnx2fc_rport structure + */ +static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + u32 conn_id, next; + + /* called with hba mutex held */ + + /* + * tgt_ofld_list access is synchronized using + * both hba mutex and hba lock. Atleast hba mutex or + * hba lock needs to be held for read access. + */ + + spin_lock_bh(&hba->hba_lock); + next = hba->next_conn_id; + conn_id = hba->next_conn_id++; + if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS) + hba->next_conn_id = 0; + + while (hba->tgt_ofld_list[conn_id] != NULL) { + conn_id++; + if (conn_id == BNX2FC_NUM_MAX_SESS) + conn_id = 0; + + if (conn_id == next) { + /* No free conn_ids are available */ + spin_unlock_bh(&hba->hba_lock); + return -1; + } + } + hba->tgt_ofld_list[conn_id] = tgt; + tgt->fcoe_conn_id = conn_id; + spin_unlock_bh(&hba->hba_lock); + return conn_id; +} + +static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) +{ + /* called with hba mutex held */ + spin_lock_bh(&hba->hba_lock); + hba->tgt_ofld_list[conn_id] = NULL; + spin_unlock_bh(&hba->hba_lock); +} + +/* + * bnx2fc_alloc_session_resc - Allocate qp resources for the session + */ +static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + dma_addr_t page; + int num_pages; + u32 *pbl; + + /* Allocate and map SQ */ + tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE; + tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + &tgt->sq_dma, GFP_KERNEL); + if (!tgt->sq) { + printk(KERN_ERR PFX "unable to allocate SQ memory %d\n", + tgt->sq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map CQ */ + tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE; + tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + &tgt->cq_dma, GFP_KERNEL); + if (!tgt->cq) { + printk(KERN_ERR PFX "unable to allocate CQ memory %d\n", + tgt->cq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map RQ and RQ PBL */ + tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE; + tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + &tgt->rq_dma, GFP_KERNEL); + if (!tgt->rq) { + printk(KERN_ERR PFX "unable to allocate RQ memory %d\n", + tgt->rq_mem_size); + goto mem_alloc_failure; + } + + tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + &tgt->rq_pbl_dma, GFP_KERNEL); + if (!tgt->rq_pbl) { + printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n", + tgt->rq_pbl_size); + goto mem_alloc_failure; + } + + num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE; + page = tgt->rq_dma; + pbl = (u32 *)tgt->rq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += CNIC_PAGE_SIZE; + } + + /* Allocate and map XFERQ */ + tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE; + tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->xferq_mem_size, &tgt->xferq_dma, + GFP_KERNEL); + if (!tgt->xferq) { + printk(KERN_ERR PFX "unable to allocate XFERQ %d\n", + tgt->xferq_mem_size); + goto mem_alloc_failure; + } + + /* Allocate and map CONFQ & CONFQ PBL */ + tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE; + tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_mem_size, &tgt->confq_dma, + GFP_KERNEL); + if (!tgt->confq) { + printk(KERN_ERR PFX "unable to allocate CONFQ %d\n", + tgt->confq_mem_size); + goto mem_alloc_failure; + } + + tgt->confq_pbl_size = + (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *); + tgt->confq_pbl_size = + (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK; + + tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev, + tgt->confq_pbl_size, + &tgt->confq_pbl_dma, GFP_KERNEL); + if (!tgt->confq_pbl) { + printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n", + tgt->confq_pbl_size); + goto mem_alloc_failure; + } + + num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE; + page = tgt->confq_dma; + pbl = (u32 *)tgt->confq_pbl; + + while (num_pages--) { + *pbl = (u32)page; + pbl++; + *pbl = (u32)((u64)page >> 32); + pbl++; + page += CNIC_PAGE_SIZE; + } + + /* Allocate and map ConnDB */ + tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db); + + tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev, + tgt->conn_db_mem_size, + &tgt->conn_db_dma, GFP_KERNEL); + if (!tgt->conn_db) { + printk(KERN_ERR PFX "unable to allocate conn_db %d\n", + tgt->conn_db_mem_size); + goto mem_alloc_failure; + } + + + /* Allocate and map LCQ */ + tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE; + tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) & + CNIC_PAGE_MASK; + + tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + &tgt->lcq_dma, GFP_KERNEL); + + if (!tgt->lcq) { + printk(KERN_ERR PFX "unable to allocate lcq %d\n", + tgt->lcq_mem_size); + goto mem_alloc_failure; + } + + tgt->conn_db->rq_prod = 0x8000; + + return 0; + +mem_alloc_failure: + return -ENOMEM; +} + +/** + * bnx2fc_free_session_resc - free qp resources for the session + * + * @hba: adapter structure pointer + * @tgt: bnx2fc_rport structure pointer + * + * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL + */ +static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, + struct bnx2fc_rport *tgt) +{ + void __iomem *ctx_base_ptr; + + BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); + + spin_lock_bh(&tgt->cq_lock); + ctx_base_ptr = tgt->ctx_base; + tgt->ctx_base = NULL; + + /* Free LCQ */ + if (tgt->lcq) { + dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, + tgt->lcq, tgt->lcq_dma); + tgt->lcq = NULL; + } + /* Free connDB */ + if (tgt->conn_db) { + dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size, + tgt->conn_db, tgt->conn_db_dma); + tgt->conn_db = NULL; + } + /* Free confq and confq pbl */ + if (tgt->confq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size, + tgt->confq_pbl, tgt->confq_pbl_dma); + tgt->confq_pbl = NULL; + } + if (tgt->confq) { + dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size, + tgt->confq, tgt->confq_dma); + tgt->confq = NULL; + } + /* Free XFERQ */ + if (tgt->xferq) { + dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size, + tgt->xferq, tgt->xferq_dma); + tgt->xferq = NULL; + } + /* Free RQ PBL and RQ */ + if (tgt->rq_pbl) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size, + tgt->rq_pbl, tgt->rq_pbl_dma); + tgt->rq_pbl = NULL; + } + if (tgt->rq) { + dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size, + tgt->rq, tgt->rq_dma); + tgt->rq = NULL; + } + /* Free CQ */ + if (tgt->cq) { + dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size, + tgt->cq, tgt->cq_dma); + tgt->cq = NULL; + } + /* Free SQ */ + if (tgt->sq) { + dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size, + tgt->sq, tgt->sq_dma); + tgt->sq = NULL; + } + spin_unlock_bh(&tgt->cq_lock); + + if (ctx_base_ptr) + iounmap(ctx_base_ptr); +} |