diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/brocade | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/brocade')
31 files changed, 22392 insertions, 0 deletions
diff --git a/drivers/net/ethernet/brocade/Kconfig b/drivers/net/ethernet/brocade/Kconfig new file mode 100644 index 0000000000..fb4c3cdf72 --- /dev/null +++ b/drivers/net/ethernet/brocade/Kconfig @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# QLogic BR-series device configuration +# + +config NET_VENDOR_BROCADE + bool "QLogic BR-series devices" + default y + depends on PCI + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about QLogic BR-series cards. If you say Y, you will be + asked for your specific card in the following questions. + +if NET_VENDOR_BROCADE + +source "drivers/net/ethernet/brocade/bna/Kconfig" + +endif # NET_VENDOR_BROCADE diff --git a/drivers/net/ethernet/brocade/Makefile b/drivers/net/ethernet/brocade/Makefile new file mode 100644 index 0000000000..88b2f40267 --- /dev/null +++ b/drivers/net/ethernet/brocade/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the QLogic BR-series device drivers. +# + +obj-$(CONFIG_BNA) += bna/ diff --git a/drivers/net/ethernet/brocade/bna/Kconfig b/drivers/net/ethernet/brocade/bna/Kconfig new file mode 100644 index 0000000000..0a48ad93ed --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Kconfig @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# QLogic BR-series network device configuration +# + +config BNA + tristate "QLogic BR-series 1010/1020/1860 10Gb Ethernet Driver support" + depends on PCI + help + This driver supports QLogic BR-series 1010/1020/1860 10Gb CEE capable + Ethernet cards. + To compile this driver as a module, choose M here: the module + will be called bna. + + For general information and support, go to the QLogic support + website at: + + <http://support.qlogic.com> diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile new file mode 100644 index 0000000000..d804b30c33 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (c) 2005-2014 Brocade Communications Systems, Inc. +# Copyright (c) 2014-2015 QLogic Corporation. +# All rights reserved. +# + +obj-$(CONFIG_BNA) += bna.o + +bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o +bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o +bna-objs += cna_fwimg.o diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c new file mode 100644 index 0000000000..eeb05e3171 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_cee.h" +#include "bfi_cna.h" +#include "bfa_ioc.h" + +static void bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg); +static void bfa_cee_format_cee_cfg(void *buffer); + +static void +bfa_cee_format_cee_cfg(void *buffer) +{ + struct bfa_cee_attr *cee_cfg = buffer; + bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote); +} + +static void +bfa_cee_stats_swap(struct bfa_cee_stats *stats) +{ + u32 *buffer = (u32 *)stats; + int i; + + for (i = 0; i < (sizeof(struct bfa_cee_stats) / sizeof(u32)); + i++) { + buffer[i] = ntohl(buffer[i]); + } +} + +static void +bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg *lldp_cfg) +{ + lldp_cfg->time_to_live = + ntohs(lldp_cfg->time_to_live); + lldp_cfg->enabled_system_cap = + ntohs(lldp_cfg->enabled_system_cap); +} + +/** + * bfa_cee_attr_meminfo - Returns the size of the DMA memory needed by CEE attributes + */ +static u32 +bfa_cee_attr_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_attr), BFA_DMA_ALIGN_SZ); +} +/** + * bfa_cee_stats_meminfo - Returns the size of the DMA memory needed by CEE stats + */ +static u32 +bfa_cee_stats_meminfo(void) +{ + return roundup(sizeof(struct bfa_cee_stats), BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_cee_get_attr_isr - CEE ISR for get-attributes responses from f/w + * + * @cee: Pointer to the CEE module + * @status: Return status from the f/w + */ +static void +bfa_cee_get_attr_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_attr_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->attr, cee->attr_dma.kva, + sizeof(struct bfa_cee_attr)); + bfa_cee_format_cee_cfg(cee->attr); + } + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) + cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status); +} + +/** + * bfa_cee_get_stats_isr - CEE ISR for get-stats responses from f/w + * + * @cee: Pointer to the CEE module + * @status: Return status from the f/w + */ +static void +bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->get_stats_status = status; + if (status == BFA_STATUS_OK) { + memcpy(cee->stats, cee->stats_dma.kva, + sizeof(struct bfa_cee_stats)); + bfa_cee_stats_swap(cee->stats); + } + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) + cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status); +} + +/** + * bfa_cee_reset_stats_isr - CEE ISR for reset-stats responses from f/w + * + * @cee: Input Pointer to the CEE module + * @status: Return status from the f/w + */ +static void +bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status) +{ + cee->reset_stats_status = status; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) + cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status); +} +/** + * bfa_nw_cee_meminfo - Returns the size of the DMA memory needed by CEE module + */ +u32 +bfa_nw_cee_meminfo(void) +{ + return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo(); +} + +/** + * bfa_nw_cee_mem_claim - Initialized CEE DMA Memory + * + * @cee: CEE module pointer + * @dma_kva: Kernel Virtual Address of CEE DMA Memory + * @dma_pa: Physical Address of CEE DMA Memory + */ +void +bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa) +{ + cee->attr_dma.kva = dma_kva; + cee->attr_dma.pa = dma_pa; + cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo(); + cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo(); + cee->attr = (struct bfa_cee_attr *) dma_kva; + cee->stats = (struct bfa_cee_stats *) + (dma_kva + bfa_cee_attr_meminfo()); +} + +/** + * bfa_nw_cee_get_attr - Send the request to the f/w to fetch CEE attributes. + * + * @cee: Pointer to the CEE module data structure. + * @attr: attribute requested + * @cbfn: function pointer + * @cbarg: function pointer arguments + * + * Return: status + */ +enum bfa_status +bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg) +{ + struct bfi_cee_get_req *cmd; + + BUG_ON(!((cee != NULL) && (cee->ioc != NULL))); + if (!bfa_nw_ioc_is_operational(cee->ioc)) + return BFA_STATUS_IOC_FAILURE; + + if (cee->get_attr_pending) + return BFA_STATUS_DEVBUSY; + + cee->get_attr_pending = true; + cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg; + cee->attr = attr; + cee->cbfn.get_attr_cbfn = cbfn; + cee->cbfn.get_attr_cbarg = cbarg; + bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ, + bfa_ioc_portid(cee->ioc)); + bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa); + bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/** + * bfa_cee_isr - Handles Mail-box interrupts for CEE module. + * @cbarg: argument passed containing pointer to the CEE module data structure. + * @m: message pointer + */ + +static void +bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m) +{ + union bfi_cee_i2h_msg_u *msg; + struct bfi_cee_get_rsp *get_rsp; + struct bfa_cee *cee = (struct bfa_cee *) cbarg; + msg = (union bfi_cee_i2h_msg_u *) m; + get_rsp = (struct bfi_cee_get_rsp *) m; + switch (msg->mh.msg_id) { + case BFI_CEE_I2H_GET_CFG_RSP: + bfa_cee_get_attr_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_GET_STATS_RSP: + bfa_cee_get_stats_isr(cee, get_rsp->cmd_status); + break; + case BFI_CEE_I2H_RESET_STATS_RSP: + bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status); + break; + default: + BUG_ON(1); + } +} + +/** + * bfa_cee_notify - CEE module heart-beat failure handler. + * + * @arg: argument passed containing pointer to the CEE module data structure. + * @event: IOC event type + */ + +static void +bfa_cee_notify(void *arg, enum bfa_ioc_event event) +{ + struct bfa_cee *cee; + cee = (struct bfa_cee *) arg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (cee->get_attr_pending) { + cee->get_attr_status = BFA_STATUS_FAILED; + cee->get_attr_pending = false; + if (cee->cbfn.get_attr_cbfn) { + cee->cbfn.get_attr_cbfn( + cee->cbfn.get_attr_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->get_stats_pending) { + cee->get_stats_status = BFA_STATUS_FAILED; + cee->get_stats_pending = false; + if (cee->cbfn.get_stats_cbfn) { + cee->cbfn.get_stats_cbfn( + cee->cbfn.get_stats_cbarg, + BFA_STATUS_FAILED); + } + } + if (cee->reset_stats_pending) { + cee->reset_stats_status = BFA_STATUS_FAILED; + cee->reset_stats_pending = false; + if (cee->cbfn.reset_stats_cbfn) { + cee->cbfn.reset_stats_cbfn( + cee->cbfn.reset_stats_cbarg, + BFA_STATUS_FAILED); + } + } + break; + + default: + break; + } +} + +/** + * bfa_nw_cee_attach - CEE module-attach API + * + * @cee: Pointer to the CEE module data structure + * @ioc: Pointer to the ioc module data structure + * @dev: Pointer to the device driver module data structure. + * The device driver specific mbox ISR functions have + * this pointer as one of the parameters. + */ +void +bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, + void *dev) +{ + BUG_ON(!(cee != NULL)); + cee->dev = dev; + cee->ioc = ioc; + + bfa_nw_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee); + bfa_ioc_notify_init(&cee->ioc_notify, bfa_cee_notify, cee); + bfa_nw_ioc_notify_register(cee->ioc, &cee->ioc_notify); +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h new file mode 100644 index 0000000000..8e628bb54b --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_CEE_H__ +#define __BFA_CEE_H__ + +#include "bfa_defs_cna.h" +#include "bfa_ioc.h" + +typedef void (*bfa_cee_get_attr_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_get_stats_cbfn_t) (void *dev, enum bfa_status status); +typedef void (*bfa_cee_reset_stats_cbfn_t) (void *dev, enum bfa_status status); + +struct bfa_cee_cbfn { + bfa_cee_get_attr_cbfn_t get_attr_cbfn; + void *get_attr_cbarg; + bfa_cee_get_stats_cbfn_t get_stats_cbfn; + void *get_stats_cbarg; + bfa_cee_reset_stats_cbfn_t reset_stats_cbfn; + void *reset_stats_cbarg; +}; + +struct bfa_cee { + void *dev; + bool get_attr_pending; + bool get_stats_pending; + bool reset_stats_pending; + enum bfa_status get_attr_status; + enum bfa_status get_stats_status; + enum bfa_status reset_stats_status; + struct bfa_cee_cbfn cbfn; + struct bfa_ioc_notify ioc_notify; + struct bfa_cee_attr *attr; + struct bfa_cee_stats *stats; + struct bfa_dma attr_dma; + struct bfa_dma stats_dma; + struct bfa_ioc *ioc; + struct bfa_mbox_cmd get_cfg_mb; + struct bfa_mbox_cmd get_stats_mb; + struct bfa_mbox_cmd reset_stats_mb; +}; + +u32 bfa_nw_cee_meminfo(void); +void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, + u64 dma_pa); +void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev); +enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee, + struct bfa_cee_attr *attr, + bfa_cee_get_attr_cbfn_t cbfn, void *cbarg); +#endif /* __BFA_CEE_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_cs.h b/drivers/net/ethernet/brocade/bna/bfa_cs.h new file mode 100644 index 0000000000..858c921294 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_cs.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* BFA common services */ + +#ifndef __BFA_CS_H__ +#define __BFA_CS_H__ + +#include "cna.h" + +/* BFA state machine interfaces */ + +/* For converting from state machine function to state encoding. */ +#define BFA_SM_TABLE(n, s, e, t) \ +struct s; \ +enum e; \ +typedef void (*t)(struct s *, enum e); \ + \ +struct n ## _sm_table_s { \ + t sm; /* state machine function */ \ + int state; /* state machine encoding */ \ + char *name; /* state name for display */ \ +}; \ + \ +static inline int \ +n ## _sm_to_state(struct n ## _sm_table_s *smt, t sm) \ +{ \ + int i = 0; \ + \ + while (smt[i].sm && smt[i].sm != sm) \ + i++; \ + return smt[i].state; \ +} + +BFA_SM_TABLE(iocpf, bfa_iocpf, iocpf_event, bfa_fsm_iocpf_t) +BFA_SM_TABLE(ioc, bfa_ioc, ioc_event, bfa_fsm_ioc_t) +BFA_SM_TABLE(cmdq, bfa_msgq_cmdq, cmdq_event, bfa_fsm_msgq_cmdq_t) +BFA_SM_TABLE(rspq, bfa_msgq_rspq, rspq_event, bfa_fsm_msgq_rspq_t) + +BFA_SM_TABLE(ioceth, bna_ioceth, bna_ioceth_event, bna_fsm_ioceth_t) +BFA_SM_TABLE(enet, bna_enet, bna_enet_event, bna_fsm_enet_t) +BFA_SM_TABLE(ethport, bna_ethport, bna_ethport_event, bna_fsm_ethport_t) +BFA_SM_TABLE(tx, bna_tx, bna_tx_event, bna_fsm_tx_t) +BFA_SM_TABLE(rxf, bna_rxf, bna_rxf_event, bna_fsm_rxf_t) +BFA_SM_TABLE(rx, bna_rx, bna_rx_event, bna_fsm_rx_t) + +#undef BFA_SM_TABLE + +#define BFA_SM(_sm) (_sm) + +/* State machine with entry actions. */ +typedef void (*bfa_fsm_t)(void *fsm, int event); + +/* oc - object class eg. bfa_ioc + * st - state, eg. reset + * otype - object type, eg. struct bfa_ioc + * etype - object type, eg. enum ioc_event + */ +#define bfa_fsm_state_decl(oc, st, otype, etype) \ + static void oc ## _sm_ ## st(otype * fsm, etype event); \ + static void oc ## _sm_ ## st ## _entry(otype * fsm) + +#define bfa_fsm_set_state(_fsm, _state) do { \ + (_fsm)->fsm = (_state); \ + _state ## _entry(_fsm); \ +} while (0) + +#define bfa_fsm_send_event(_fsm, _event) ((_fsm)->fsm((_fsm), (_event))) +#define bfa_fsm_cmp_state(_fsm, _state) ((_fsm)->fsm == (_state)) +/* Generic wait counter. */ + +typedef void (*bfa_wc_resume_t) (void *cbarg); + +struct bfa_wc { + bfa_wc_resume_t wc_resume; + void *wc_cbarg; + int wc_count; +}; + +static inline void +bfa_wc_up(struct bfa_wc *wc) +{ + wc->wc_count++; +} + +static inline void +bfa_wc_down(struct bfa_wc *wc) +{ + wc->wc_count--; + if (wc->wc_count == 0) + wc->wc_resume(wc->wc_cbarg); +} + +/* Initialize a waiting counter. */ +static inline void +bfa_wc_init(struct bfa_wc *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg) +{ + wc->wc_resume = wc_resume; + wc->wc_cbarg = wc_cbarg; + wc->wc_count = 0; + bfa_wc_up(wc); +} + +/* Wait for counter to reach zero */ +static inline void +bfa_wc_wait(struct bfa_wc *wc) +{ + bfa_wc_down(wc); +} + +#endif /* __BFA_CS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h new file mode 100644 index 0000000000..b08b16864b --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h @@ -0,0 +1,283 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_DEFS_H__ +#define __BFA_DEFS_H__ + +#include "cna.h" +#include "bfa_defs_status.h" +#include "bfa_defs_mfg_comm.h" + +#define BFA_VERSION_LEN 64 + +/* ---------------------- adapter definitions ------------ */ + +/* BFA adapter level attributes. */ +enum { + BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE), + /* + *!< adapter serial num length + */ + BFA_ADAPTER_MODEL_NAME_LEN = 16, /*!< model name length */ + BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*!< model description length */ + BFA_ADAPTER_MFG_NAME_LEN = 8, /*!< manufacturer name length */ + BFA_ADAPTER_SYM_NAME_LEN = 64, /*!< adapter symbolic name length */ + BFA_ADAPTER_OS_TYPE_LEN = 64, /*!< adapter os type length */ +}; + +struct bfa_adapter_attr { + char manufacturer[BFA_ADAPTER_MFG_NAME_LEN]; + char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN]; + u32 card_type; + char model[BFA_ADAPTER_MODEL_NAME_LEN]; + char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN]; + u64 pwwn; + char node_symname[FC_SYMNAME_MAX]; + char hw_ver[BFA_VERSION_LEN]; + char fw_ver[BFA_VERSION_LEN]; + char optrom_ver[BFA_VERSION_LEN]; + char os_type[BFA_ADAPTER_OS_TYPE_LEN]; + struct bfa_mfg_vpd vpd; + u8 mac[ETH_ALEN]; + + u8 nports; + u8 max_speed; + u8 prototype; + char asic_rev; + + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 cna_capable; + + u8 is_mezz; + u8 trunk_capable; +}; + +/* ---------------------- IOC definitions ------------ */ + +enum { + BFA_IOC_DRIVER_LEN = 16, + BFA_IOC_CHIP_REV_LEN = 8, +}; + +/* Driver and firmware versions. */ +struct bfa_ioc_driver_attr { + char driver[BFA_IOC_DRIVER_LEN]; /*!< driver name */ + char driver_ver[BFA_VERSION_LEN]; /*!< driver version */ + char fw_ver[BFA_VERSION_LEN]; /*!< firmware version */ + char bios_ver[BFA_VERSION_LEN]; /*!< bios version */ + char efi_ver[BFA_VERSION_LEN]; /*!< EFI version */ + char ob_ver[BFA_VERSION_LEN]; /*!< openboot version */ +}; + +/* IOC PCI device attributes */ +struct bfa_ioc_pci_attr { + u16 vendor_id; /*!< PCI vendor ID */ + u16 device_id; /*!< PCI device ID */ + u16 ssid; /*!< subsystem ID */ + u16 ssvid; /*!< subsystem vendor ID */ + u32 pcifn; /*!< PCI device function */ + u32 rsvd; /* padding */ + char chip_rev[BFA_IOC_CHIP_REV_LEN]; /*!< chip revision */ +}; + +/* IOC states */ +enum bfa_ioc_state { + BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */ + BFA_IOC_RESET = 2, /*!< IOC is in reset state */ + BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */ + BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */ + BFA_IOC_GETATTR = 5, /*!< IOC is being configured */ + BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */ + BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */ + BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */ + BFA_IOC_DISABLED = 10, /*!< IOC is disabled */ + BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */ + BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */ + BFA_IOC_HWFAIL = 13, /*!< PCI mapping doesn't exist */ +}; + +/* IOC firmware stats */ +struct bfa_fw_ioc_stats { + u32 enable_reqs; + u32 disable_reqs; + u32 get_attr_reqs; + u32 dbg_sync; + u32 dbg_dump; + u32 unknown_reqs; +}; + +/* IOC driver stats */ +struct bfa_ioc_drv_stats { + u32 ioc_isrs; + u32 ioc_enables; + u32 ioc_disables; + u32 ioc_hbfails; + u32 ioc_boots; + u32 stats_tmos; + u32 hb_count; + u32 disable_reqs; + u32 enable_reqs; + u32 disable_replies; + u32 enable_replies; + u32 rsvd; +}; + +/* IOC statistics */ +struct bfa_ioc_stats { + struct bfa_ioc_drv_stats drv_stats; /*!< driver IOC stats */ + struct bfa_fw_ioc_stats fw_stats; /*!< firmware IOC stats */ +}; + +enum bfa_ioc_type { + BFA_IOC_TYPE_FC = 1, + BFA_IOC_TYPE_FCoE = 2, + BFA_IOC_TYPE_LL = 3, +}; + +/* IOC attributes returned in queries */ +struct bfa_ioc_attr { + enum bfa_ioc_type ioc_type; + enum bfa_ioc_state state; /*!< IOC state */ + struct bfa_adapter_attr adapter_attr; /*!< HBA attributes */ + struct bfa_ioc_driver_attr driver_attr; /*!< driver attr */ + struct bfa_ioc_pci_attr pci_attr; + u8 port_id; /*!< port number */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability */ + u8 port_mode_cfg; /*!< enum bfa_mode */ + u8 def_fn; /*!< 1 if default fn */ + u8 rsvd[3]; /*!< 64bit align */ +}; + +/* Adapter capability mask definition */ +enum { + BFA_CM_HBA = 0x01, + BFA_CM_CNA = 0x02, + BFA_CM_NIC = 0x04, +}; + +/* ---------------------- mfg definitions ------------ */ + +/* Checksum size */ +#define BFA_MFG_CHKSUM_SIZE 16 + +#define BFA_MFG_PARTNUM_SIZE 14 +#define BFA_MFG_SUPPLIER_ID_SIZE 10 +#define BFA_MFG_SUPPLIER_PARTNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE 20 +#define BFA_MFG_SUPPLIER_REVISION_SIZE 4 + +/* BFA adapter manufacturing block definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_block { + u8 version; /* manufacturing block version */ + u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */ + u16 mfgsize; /* mfg block size */ + u16 u16_chksum; /* old u16 checksum */ + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)]; + u8 mfg_day; /* manufacturing day */ + u8 mfg_month; /* manufacturing month */ + u16 mfg_year; /* manufacturing year */ + u64 mfg_wwn; /* wwn base for this adapter */ + u8 num_wwn; /* number of wwns assigned */ + u8 mfg_speeds; /* speeds allowed for this adapter */ + u8 rsv[2]; + char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)]; + char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)]; + char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)]; + char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)]; + u8 mfg_mac[ETH_ALEN]; /* base mac address */ + u8 num_mac; /* number of mac addresses */ + u8 rsv2; + u32 card_type; /* card type */ + char cap_nic; /* capability nic */ + char cap_cna; /* capability cna */ + char cap_hba; /* capability hba */ + char cap_fc16g; /* capability fc 16g */ + char cap_sriov; /* capability sriov */ + char cap_mezz; /* capability mezz */ + u8 rsv3; + u8 mfg_nports; /* number of ports */ + char media[8]; /* xfi/xaui */ + char initial_mode[8]; /* initial mode: hba/cna/nic */ + u8 rsv4[84]; + u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */ +} __packed; + +/* ---------------------- pci definitions ------------ */ + +/* + * PCI device ID information + */ +enum { + BFA_PCI_DEVICE_ID_CT2 = 0x22, +}; + +#define bfa_asic_id_ct(device) \ + ((device) == PCI_DEVICE_ID_BROCADE_CT || \ + (device) == PCI_DEVICE_ID_BROCADE_CT_FC) +#define bfa_asic_id_ct2(device) \ + ((device) == BFA_PCI_DEVICE_ID_CT2) +#define bfa_asic_id_ctc(device) \ + (bfa_asic_id_ct(device) || bfa_asic_id_ct2(device)) + +/* PCI sub-system device and vendor ID information */ +enum { + BFA_PCI_FCOE_SSDEVICE_ID = 0x14, + BFA_PCI_CT2_SSID_FCoE = 0x22, + BFA_PCI_CT2_SSID_ETH = 0x23, + BFA_PCI_CT2_SSID_FC = 0x24, +}; + +enum bfa_mode { + BFA_MODE_HBA = 1, + BFA_MODE_CNA = 2, + BFA_MODE_NIC = 3 +}; + +/* + * Flash module specific + */ +#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */ +#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */ +#define BFA_TOTAL_FLASH_SIZE 0x400000 +#define BFA_FLASH_PART_FWIMG 2 +#define BFA_FLASH_PART_MFG 7 + +/* + * flash partition attributes + */ +struct bfa_flash_part_attr { + u32 part_type; /* partition type */ + u32 part_instance; /* partition instance */ + u32 part_off; /* partition offset */ + u32 part_size; /* partition size */ + u32 part_len; /* partition content length */ + u32 part_status; /* partition status */ + char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24]; +}; + +/* + * flash attributes + */ +struct bfa_flash_attr { + u32 status; /* flash overall status */ + u32 npart; /* num of partitions */ + struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX]; +}; + +#endif /* __BFA_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h new file mode 100644 index 0000000000..50d3562f7d --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_cna.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_CNA_H__ +#define __BFA_DEFS_CNA_H__ + +#include "bfa_defs.h" + +/* FC physical port statistics. */ +struct bfa_port_fc_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 tx_frames; /*!< Tx frames */ + u64 tx_words; /*!< Tx words */ + u64 tx_lip; /*!< Tx LIP */ + u64 tx_nos; /*!< Tx NOS */ + u64 tx_ols; /*!< Tx OLS */ + u64 tx_lr; /*!< Tx LR */ + u64 tx_lrr; /*!< Tx LRR */ + u64 rx_frames; /*!< Rx frames */ + u64 rx_words; /*!< Rx words */ + u64 lip_count; /*!< Rx LIP */ + u64 nos_count; /*!< Rx NOS */ + u64 ols_count; /*!< Rx OLS */ + u64 lr_count; /*!< Rx LR */ + u64 lrr_count; /*!< Rx LRR */ + u64 invalid_crcs; /*!< Rx CRC err frames */ + u64 invalid_crc_gd_eof; /*!< Rx CRC err good EOF frames */ + u64 undersized_frm; /*!< Rx undersized frames */ + u64 oversized_frm; /*!< Rx oversized frames */ + u64 bad_eof_frm; /*!< Rx frames with bad EOF */ + u64 error_frames; /*!< Errored frames */ + u64 dropped_frames; /*!< Dropped frames */ + u64 link_failures; /*!< Link Failure (LF) count */ + u64 loss_of_syncs; /*!< Loss of sync count */ + u64 loss_of_signals; /*!< Loss of signal count */ + u64 primseq_errs; /*!< Primitive sequence protocol err. */ + u64 bad_os_count; /*!< Invalid ordered sets */ + u64 err_enc_out; /*!< Encoding err nonframe_8b10b */ + u64 err_enc; /*!< Encoding err frame_8b10b */ + u64 bbsc_frames_lost; /*!< Credit Recovery-Frames Lost */ + u64 bbsc_credits_lost; /*!< Credit Recovery-Credits Lost */ + u64 bbsc_link_resets; /*!< Credit Recovery-Link Resets */ +}; + +/* Eth Physical Port statistics. */ +struct bfa_port_eth_stats { + u64 secs_reset; /*!< Seconds since stats is reset */ + u64 frame_64; /*!< Frames 64 bytes */ + u64 frame_65_127; /*!< Frames 65-127 bytes */ + u64 frame_128_255; /*!< Frames 128-255 bytes */ + u64 frame_256_511; /*!< Frames 256-511 bytes */ + u64 frame_512_1023; /*!< Frames 512-1023 bytes */ + u64 frame_1024_1518; /*!< Frames 1024-1518 bytes */ + u64 frame_1519_1522; /*!< Frames 1519-1522 bytes */ + u64 tx_bytes; /*!< Tx bytes */ + u64 tx_packets; /*!< Tx packets */ + u64 tx_mcast_packets; /*!< Tx multicast packets */ + u64 tx_bcast_packets; /*!< Tx broadcast packets */ + u64 tx_control_frame; /*!< Tx control frame */ + u64 tx_drop; /*!< Tx drops */ + u64 tx_jabber; /*!< Tx jabber */ + u64 tx_fcs_error; /*!< Tx FCS errors */ + u64 tx_fragments; /*!< Tx fragments */ + u64 rx_bytes; /*!< Rx bytes */ + u64 rx_packets; /*!< Rx packets */ + u64 rx_mcast_packets; /*!< Rx multicast packets */ + u64 rx_bcast_packets; /*!< Rx broadcast packets */ + u64 rx_control_frames; /*!< Rx control frames */ + u64 rx_unknown_opcode; /*!< Rx unknown opcode */ + u64 rx_drop; /*!< Rx drops */ + u64 rx_jabber; /*!< Rx jabber */ + u64 rx_fcs_error; /*!< Rx FCS errors */ + u64 rx_alignment_error; /*!< Rx alignment errors */ + u64 rx_frame_length_error; /*!< Rx frame len errors */ + u64 rx_code_error; /*!< Rx code errors */ + u64 rx_fragments; /*!< Rx fragments */ + u64 rx_pause; /*!< Rx pause */ + u64 rx_zero_pause; /*!< Rx zero pause */ + u64 tx_pause; /*!< Tx pause */ + u64 tx_zero_pause; /*!< Tx zero pause */ + u64 rx_fcoe_pause; /*!< Rx FCoE pause */ + u64 rx_fcoe_zero_pause; /*!< Rx FCoE zero pause */ + u64 tx_fcoe_pause; /*!< Tx FCoE pause */ + u64 tx_fcoe_zero_pause; /*!< Tx FCoE zero pause */ + u64 rx_iscsi_pause; /*!< Rx iSCSI pause */ + u64 rx_iscsi_zero_pause; /*!< Rx iSCSI zero pause */ + u64 tx_iscsi_pause; /*!< Tx iSCSI pause */ + u64 tx_iscsi_zero_pause; /*!< Tx iSCSI zero pause */ +}; + +/* Port statistics. */ +union bfa_port_stats_u { + struct bfa_port_fc_stats fc; + struct bfa_port_eth_stats eth; +}; + +#define BFA_CEE_LLDP_MAX_STRING_LEN (128) +#define BFA_CEE_DCBX_MAX_PRIORITY (8) +#define BFA_CEE_DCBX_MAX_PGID (8) + +#define BFA_CEE_LLDP_SYS_CAP_OTHER 0x0001 +#define BFA_CEE_LLDP_SYS_CAP_REPEATER 0x0002 +#define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE 0x0004 +#define BFA_CEE_LLDP_SYS_CAP_WLAN_AP 0x0008 +#define BFA_CEE_LLDP_SYS_CAP_ROUTER 0x0010 +#define BFA_CEE_LLDP_SYS_CAP_TELEPHONE 0x0020 +#define BFA_CEE_LLDP_SYS_CAP_DOCSIS_CD 0x0040 +#define BFA_CEE_LLDP_SYS_CAP_STATION 0x0080 +#define BFA_CEE_LLDP_SYS_CAP_CVLAN 0x0100 +#define BFA_CEE_LLDP_SYS_CAP_SVLAN 0x0200 +#define BFA_CEE_LLDP_SYS_CAP_TPMR 0x0400 + +/* LLDP string type */ +struct bfa_cee_lldp_str { + u8 sub_type; + u8 len; + u8 rsvd[2]; + u8 value[BFA_CEE_LLDP_MAX_STRING_LEN]; +} __packed; + +/* LLDP parameters */ +struct bfa_cee_lldp_cfg { + struct bfa_cee_lldp_str chassis_id; + struct bfa_cee_lldp_str port_id; + struct bfa_cee_lldp_str port_desc; + struct bfa_cee_lldp_str sys_name; + struct bfa_cee_lldp_str sys_desc; + struct bfa_cee_lldp_str mgmt_addr; + u16 time_to_live; + u16 enabled_system_cap; +} __packed; + +enum bfa_cee_dcbx_version { + DCBX_PROTOCOL_PRECEE = 1, + DCBX_PROTOCOL_CEE = 2, +}; + +enum bfa_cee_lls { + /* LLS is down because the TLV not sent by the peer */ + CEE_LLS_DOWN_NO_TLV = 0, + /* LLS is down as advertised by the peer */ + CEE_LLS_DOWN = 1, + CEE_LLS_UP = 2, +}; + +/* CEE/DCBX parameters */ +struct bfa_cee_dcbx_cfg { + u8 pgid[BFA_CEE_DCBX_MAX_PRIORITY]; + u8 pg_percentage[BFA_CEE_DCBX_MAX_PGID]; + u8 pfc_primap; /* bitmap of priorties with PFC enabled */ + u8 fcoe_primap; /* bitmap of priorities used for FcoE traffic */ + u8 iscsi_primap; /* bitmap of priorities used for iSCSI traffic */ + u8 dcbx_version; /* operating version:CEE or preCEE */ + u8 lls_fcoe; /* FCoE Logical Link Status */ + u8 lls_lan; /* LAN Logical Link Status */ + u8 rsvd[2]; +} __packed; + +/* CEE status */ +/* Making this to tri-state for the benefit of port list command */ +enum bfa_cee_status { + CEE_UP = 0, + CEE_PHY_UP = 1, + CEE_LOOPBACK = 2, + CEE_PHY_DOWN = 3, +}; + +/* CEE Query */ +struct bfa_cee_attr { + u8 cee_status; + u8 error_reason; + struct bfa_cee_lldp_cfg lldp_remote; + struct bfa_cee_dcbx_cfg dcbx_remote; + u8 src_mac[ETH_ALEN]; + u8 link_speed; + u8 nw_priority; + u8 filler[2]; +} __packed; + +/* LLDP/DCBX/CEE Statistics */ +struct bfa_cee_stats { + u32 lldp_tx_frames; /*!< LLDP Tx Frames */ + u32 lldp_rx_frames; /*!< LLDP Rx Frames */ + u32 lldp_rx_frames_invalid; /*!< LLDP Rx Frames invalid */ + u32 lldp_rx_frames_new; /*!< LLDP Rx Frames new */ + u32 lldp_tlvs_unrecognized; /*!< LLDP Rx unrecognized TLVs */ + u32 lldp_rx_shutdown_tlvs; /*!< LLDP Rx shutdown TLVs */ + u32 lldp_info_aged_out; /*!< LLDP remote info aged out */ + u32 dcbx_phylink_ups; /*!< DCBX phy link ups */ + u32 dcbx_phylink_downs; /*!< DCBX phy link downs */ + u32 dcbx_rx_tlvs; /*!< DCBX Rx TLVs */ + u32 dcbx_rx_tlvs_invalid; /*!< DCBX Rx TLVs invalid */ + u32 dcbx_control_tlv_error; /*!< DCBX control TLV errors */ + u32 dcbx_feature_tlv_error; /*!< DCBX feature TLV errors */ + u32 dcbx_cee_cfg_new; /*!< DCBX new CEE cfg rcvd */ + u32 cee_status_down; /*!< CEE status down */ + u32 cee_status_up; /*!< CEE status up */ + u32 cee_hw_cfg_changed; /*!< CEE hw cfg changed */ + u32 cee_rx_invalid_cfg; /*!< CEE invalid cfg */ +} __packed; + +#endif /* __BFA_DEFS_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h new file mode 100644 index 0000000000..0478f35ae6 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_mfg_comm.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_MFG_COMM_H__ +#define __BFA_DEFS_MFG_COMM_H__ + +#include "bfa_defs.h" + +/* Manufacturing block version */ +#define BFA_MFG_VERSION 3 +#define BFA_MFG_VERSION_UNINIT 0xFF + +/* Manufacturing block encrypted version */ +#define BFA_MFG_ENC_VER 2 + +/* Manufacturing block version 1 length */ +#define BFA_MFG_VER1_LEN 128 + +/* Manufacturing block header length */ +#define BFA_MFG_HDR_LEN 4 + +#define BFA_MFG_SERIALNUM_SIZE 11 +#define STRSZ(_n) (((_n) + 4) & ~3) + +/* Manufacturing card type */ +enum { + BFA_MFG_TYPE_CB_MAX = 825, /*!< Crossbow card type max */ + BFA_MFG_TYPE_FC8P2 = 825, /*!< 8G 2port FC card */ + BFA_MFG_TYPE_FC8P1 = 815, /*!< 8G 1port FC card */ + BFA_MFG_TYPE_FC4P2 = 425, /*!< 4G 2port FC card */ + BFA_MFG_TYPE_FC4P1 = 415, /*!< 4G 1port FC card */ + BFA_MFG_TYPE_CNA10P2 = 1020, /*!< 10G 2port CNA card */ + BFA_MFG_TYPE_CNA10P1 = 1010, /*!< 10G 1port CNA card */ + BFA_MFG_TYPE_JAYHAWK = 804, /*!< Jayhawk mezz card */ + BFA_MFG_TYPE_WANCHESE = 1007, /*!< Wanchese mezz card */ + BFA_MFG_TYPE_ASTRA = 807, /*!< Astra mezz card */ + BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*!< Lightning mezz card - old */ + BFA_MFG_TYPE_LIGHTNING = 1741, /*!< Lightning mezz card */ + BFA_MFG_TYPE_PROWLER_F = 1560, /*!< Prowler FC only cards */ + BFA_MFG_TYPE_PROWLER_N = 1410, /*!< Prowler NIC only cards */ + BFA_MFG_TYPE_PROWLER_C = 1710, /*!< Prowler CNA only cards */ + BFA_MFG_TYPE_PROWLER_D = 1860, /*!< Prowler Dual cards */ + BFA_MFG_TYPE_CHINOOK = 1867, /*!< Chinook cards */ + BFA_MFG_TYPE_INVALID = 0, /*!< Invalid card type */ +}; + +/* Check if Mezz card */ +#define bfa_mfg_is_mezz(type) (( \ + (type) == BFA_MFG_TYPE_JAYHAWK || \ + (type) == BFA_MFG_TYPE_WANCHESE || \ + (type) == BFA_MFG_TYPE_ASTRA || \ + (type) == BFA_MFG_TYPE_LIGHTNING_P0 || \ + (type) == BFA_MFG_TYPE_LIGHTNING || \ + (type) == BFA_MFG_TYPE_CHINOOK)) + +enum { + CB_GPIO_TTV = (1), /*!< TTV debug capable cards */ + CB_GPIO_FC8P2 = (2), /*!< 8G 2port FC card */ + CB_GPIO_FC8P1 = (3), /*!< 8G 1port FC card */ + CB_GPIO_FC4P2 = (4), /*!< 4G 2port FC card */ + CB_GPIO_FC4P1 = (5), /*!< 4G 1port FC card */ + CB_GPIO_DFLY = (6), /*!< 8G 2port FC mezzanine card */ + CB_GPIO_PROTO = BIT(7) /*!< 8G 2port FC prototypes */ +}; + +#define bfa_mfg_adapter_prop_init_gpio(gpio, card_type, prop) \ +do { \ + if ((gpio) & CB_GPIO_PROTO) { \ + (prop) |= BFI_ADAPTER_PROTO; \ + (gpio) &= ~CB_GPIO_PROTO; \ + } \ + switch (gpio) { \ + case CB_GPIO_TTV: \ + (prop) |= BFI_ADAPTER_TTV; \ + case CB_GPIO_DFLY: \ + case CB_GPIO_FC8P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P2; \ + break; \ + case CB_GPIO_FC8P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 8); \ + (card_type) = BFA_MFG_TYPE_FC8P1; \ + break; \ + case CB_GPIO_FC4P2: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 2); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P2; \ + break; \ + case CB_GPIO_FC4P1: \ + (prop) |= BFI_ADAPTER_SETP(NPORTS, 1); \ + (prop) |= BFI_ADAPTER_SETP(SPEED, 4); \ + (card_type) = BFA_MFG_TYPE_FC4P1; \ + break; \ + default: \ + (prop) |= BFI_ADAPTER_UNSUPP; \ + (card_type) = BFA_MFG_TYPE_INVALID; \ + } \ +} while (0) + +/* VPD data length */ +#define BFA_MFG_VPD_LEN 512 +#define BFA_MFG_VPD_LEN_INVALID 0 + +#define BFA_MFG_VPD_PCI_HDR_OFF 137 +#define BFA_MFG_VPD_PCI_VER_MASK 0x07 /*!< version mask 3 bits */ +#define BFA_MFG_VPD_PCI_VDR_MASK 0xf8 /*!< vendor mask 5 bits */ + +/* VPD vendor tag */ +enum { + BFA_MFG_VPD_UNKNOWN = 0, /*!< vendor unknown */ + BFA_MFG_VPD_IBM = 1, /*!< vendor IBM */ + BFA_MFG_VPD_HP = 2, /*!< vendor HP */ + BFA_MFG_VPD_DELL = 3, /*!< vendor DELL */ + BFA_MFG_VPD_PCI_IBM = 0x08, /*!< PCI VPD IBM */ + BFA_MFG_VPD_PCI_HP = 0x10, /*!< PCI VPD HP */ + BFA_MFG_VPD_PCI_DELL = 0x20, /*!< PCI VPD DELL */ + BFA_MFG_VPD_PCI_BRCD = 0xf8, /*!< PCI VPD Brocade */ +}; + +/* BFA adapter flash vpd data definition. + * + * All numerical fields are in big-endian format. + */ +struct bfa_mfg_vpd { + u8 version; /*!< vpd data version */ + u8 vpd_sig[3]; /*!< characters 'V', 'P', 'D' */ + u8 chksum; /*!< u8 checksum */ + u8 vendor; /*!< vendor */ + u8 len; /*!< vpd data length excluding header */ + u8 rsv; + u8 data[BFA_MFG_VPD_LEN]; /*!< vpd data */ +} __packed; + +#endif /* __BFA_DEFS_MFG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs_status.h b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h new file mode 100644 index 0000000000..0ed9ec2e68 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_defs_status.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFA_DEFS_STATUS_H__ +#define __BFA_DEFS_STATUS_H__ + +/* API status return values + * + * NOTE: The error msgs are auto generated from the comments. Only singe line + * comments are supported + */ +enum bfa_status { + BFA_STATUS_OK = 0, + BFA_STATUS_FAILED = 1, + BFA_STATUS_EINVAL = 2, + BFA_STATUS_ENOMEM = 3, + BFA_STATUS_ENOSYS = 4, + BFA_STATUS_ETIMER = 5, + BFA_STATUS_EPROTOCOL = 6, + BFA_STATUS_ENOFCPORTS = 7, + BFA_STATUS_NOFLASH = 8, + BFA_STATUS_BADFLASH = 9, + BFA_STATUS_SFP_UNSUPP = 10, + BFA_STATUS_UNKNOWN_VFID = 11, + BFA_STATUS_DATACORRUPTED = 12, + BFA_STATUS_DEVBUSY = 13, + BFA_STATUS_ABORTED = 14, + BFA_STATUS_NODEV = 15, + BFA_STATUS_HDMA_FAILED = 16, + BFA_STATUS_FLASH_BAD_LEN = 17, + BFA_STATUS_UNKNOWN_LWWN = 18, + BFA_STATUS_UNKNOWN_RWWN = 19, + BFA_STATUS_FCPT_LS_RJT = 20, + BFA_STATUS_VPORT_EXISTS = 21, + BFA_STATUS_VPORT_MAX = 22, + BFA_STATUS_UNSUPP_SPEED = 23, + BFA_STATUS_INVLD_DFSZ = 24, + BFA_STATUS_CNFG_FAILED = 25, + BFA_STATUS_CMD_NOTSUPP = 26, + BFA_STATUS_NO_ADAPTER = 27, + BFA_STATUS_LINKDOWN = 28, + BFA_STATUS_FABRIC_RJT = 29, + BFA_STATUS_UNKNOWN_VWWN = 30, + BFA_STATUS_NSLOGIN_FAILED = 31, + BFA_STATUS_NO_RPORTS = 32, + BFA_STATUS_NSQUERY_FAILED = 33, + BFA_STATUS_PORT_OFFLINE = 34, + BFA_STATUS_RPORT_OFFLINE = 35, + BFA_STATUS_TGTOPEN_FAILED = 36, + BFA_STATUS_BAD_LUNS = 37, + BFA_STATUS_IO_FAILURE = 38, + BFA_STATUS_NO_FABRIC = 39, + BFA_STATUS_EBADF = 40, + BFA_STATUS_EINTR = 41, + BFA_STATUS_EIO = 42, + BFA_STATUS_ENOTTY = 43, + BFA_STATUS_ENXIO = 44, + BFA_STATUS_EFOPEN = 45, + BFA_STATUS_VPORT_WWN_BP = 46, + BFA_STATUS_PORT_NOT_DISABLED = 47, + BFA_STATUS_BADFRMHDR = 48, + BFA_STATUS_BADFRMSZ = 49, + BFA_STATUS_MISSINGFRM = 50, + BFA_STATUS_LINKTIMEOUT = 51, + BFA_STATUS_NO_FCPIM_NEXUS = 52, + BFA_STATUS_CHECKSUM_FAIL = 53, + BFA_STATUS_GZME_FAILED = 54, + BFA_STATUS_SCSISTART_REQD = 55, + BFA_STATUS_IOC_FAILURE = 56, + BFA_STATUS_INVALID_WWN = 57, + BFA_STATUS_MISMATCH = 58, + BFA_STATUS_IOC_ENABLED = 59, + BFA_STATUS_ADAPTER_ENABLED = 60, + BFA_STATUS_IOC_NON_OP = 61, + BFA_STATUS_ADDR_MAP_FAILURE = 62, + BFA_STATUS_SAME_NAME = 63, + BFA_STATUS_PENDING = 64, + BFA_STATUS_8G_SPD = 65, + BFA_STATUS_4G_SPD = 66, + BFA_STATUS_AD_IS_ENABLE = 67, + BFA_STATUS_EINVAL_TOV = 68, + BFA_STATUS_EINVAL_QDEPTH = 69, + BFA_STATUS_VERSION_FAIL = 70, + BFA_STATUS_DIAG_BUSY = 71, + BFA_STATUS_BEACON_ON = 72, + BFA_STATUS_BEACON_OFF = 73, + BFA_STATUS_LBEACON_ON = 74, + BFA_STATUS_LBEACON_OFF = 75, + BFA_STATUS_PORT_NOT_INITED = 76, + BFA_STATUS_RPSC_ENABLED = 77, + BFA_STATUS_ENOFSAVE = 78, + BFA_STATUS_BAD_FILE = 79, + BFA_STATUS_RLIM_EN = 80, + BFA_STATUS_RLIM_DIS = 81, + BFA_STATUS_IOC_DISABLED = 82, + BFA_STATUS_ADAPTER_DISABLED = 83, + BFA_STATUS_BIOS_DISABLED = 84, + BFA_STATUS_AUTH_ENABLED = 85, + BFA_STATUS_AUTH_DISABLED = 86, + BFA_STATUS_ERROR_TRL_ENABLED = 87, + BFA_STATUS_ERROR_QOS_ENABLED = 88, + BFA_STATUS_NO_SFP_DEV = 89, + BFA_STATUS_MEMTEST_FAILED = 90, + BFA_STATUS_INVALID_DEVID = 91, + BFA_STATUS_QOS_ENABLED = 92, + BFA_STATUS_QOS_DISABLED = 93, + BFA_STATUS_INCORRECT_DRV_CONFIG = 94, + BFA_STATUS_REG_FAIL = 95, + BFA_STATUS_IM_INV_CODE = 96, + BFA_STATUS_IM_INV_VLAN = 97, + BFA_STATUS_IM_INV_ADAPT_NAME = 98, + BFA_STATUS_IM_LOW_RESOURCES = 99, + BFA_STATUS_IM_VLANID_IS_PVID = 100, + BFA_STATUS_IM_VLANID_EXISTS = 101, + BFA_STATUS_IM_FW_UPDATE_FAIL = 102, + BFA_STATUS_PORTLOG_ENABLED = 103, + BFA_STATUS_PORTLOG_DISABLED = 104, + BFA_STATUS_FILE_NOT_FOUND = 105, + BFA_STATUS_QOS_FC_ONLY = 106, + BFA_STATUS_RLIM_FC_ONLY = 107, + BFA_STATUS_CT_SPD = 108, + BFA_STATUS_LEDTEST_OP = 109, + BFA_STATUS_CEE_NOT_DN = 110, + BFA_STATUS_10G_SPD = 111, + BFA_STATUS_IM_INV_TEAM_NAME = 112, + BFA_STATUS_IM_DUP_TEAM_NAME = 113, + BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, + BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, + BFA_STATUS_IM_PVID_MISMATCH = 116, + BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, + BFA_STATUS_IM_MTU_MISMATCH = 118, + BFA_STATUS_IM_RSS_MISMATCH = 119, + BFA_STATUS_IM_HDS_MISMATCH = 120, + BFA_STATUS_IM_OFFLOAD_MISMATCH = 121, + BFA_STATUS_IM_PORT_PARAMS = 122, + BFA_STATUS_IM_PORT_NOT_IN_TEAM = 123, + BFA_STATUS_IM_CANNOT_REM_PRI = 124, + BFA_STATUS_IM_MAX_PORTS_REACHED = 125, + BFA_STATUS_IM_LAST_PORT_DELETE = 126, + BFA_STATUS_IM_NO_DRIVER = 127, + BFA_STATUS_IM_MAX_VLANS_REACHED = 128, + BFA_STATUS_TOMCAT_SPD_NOT_ALLOWED = 129, + BFA_STATUS_NO_MINPORT_DRIVER = 130, + BFA_STATUS_CARD_TYPE_MISMATCH = 131, + BFA_STATUS_BAD_ASICBLK = 132, + BFA_STATUS_NO_DRIVER = 133, + BFA_STATUS_INVALID_MAC = 134, + BFA_STATUS_IM_NO_VLAN = 135, + BFA_STATUS_IM_ETH_LB_FAILED = 136, + BFA_STATUS_IM_PVID_REMOVE = 137, + BFA_STATUS_IM_PVID_EDIT = 138, + BFA_STATUS_CNA_NO_BOOT = 139, + BFA_STATUS_IM_PVID_NON_ZERO = 140, + BFA_STATUS_IM_INETCFG_LOCK_FAILED = 141, + BFA_STATUS_IM_GET_INETCFG_FAILED = 142, + BFA_STATUS_IM_NOT_BOUND = 143, + BFA_STATUS_INSUFFICIENT_PERMS = 144, + BFA_STATUS_IM_INV_VLAN_NAME = 145, + BFA_STATUS_CMD_NOTSUPP_CNA = 146, + BFA_STATUS_IM_PASSTHRU_EDIT = 147, + BFA_STATUS_IM_BIND_FAILED = 148, + BFA_STATUS_IM_UNBIND_FAILED = 149, + BFA_STATUS_IM_PORT_IN_TEAM = 150, + BFA_STATUS_IM_VLAN_NOT_FOUND = 151, + BFA_STATUS_IM_TEAM_NOT_FOUND = 152, + BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, + BFA_STATUS_PBC = 154, + BFA_STATUS_DEVID_MISSING = 155, + BFA_STATUS_BAD_FWCFG = 156, + BFA_STATUS_CREATE_FILE = 157, + BFA_STATUS_INVALID_VENDOR = 158, + BFA_STATUS_SFP_NOT_READY = 159, + BFA_STATUS_FLASH_UNINIT = 160, + BFA_STATUS_FLASH_EMPTY = 161, + BFA_STATUS_FLASH_CKFAIL = 162, + BFA_STATUS_TRUNK_UNSUPP = 163, + BFA_STATUS_TRUNK_ENABLED = 164, + BFA_STATUS_TRUNK_DISABLED = 165, + BFA_STATUS_TRUNK_ERROR_TRL_ENABLED = 166, + BFA_STATUS_BOOT_CODE_UPDATED = 167, + BFA_STATUS_BOOT_VERSION = 168, + BFA_STATUS_CARDTYPE_MISSING = 169, + BFA_STATUS_INVALID_CARDTYPE = 170, + BFA_STATUS_NO_TOPOLOGY_FOR_CNA = 171, + BFA_STATUS_IM_VLAN_OVER_TEAM_DELETE_FAILED = 172, + BFA_STATUS_ETHBOOT_ENABLED = 173, + BFA_STATUS_ETHBOOT_DISABLED = 174, + BFA_STATUS_IOPROFILE_OFF = 175, + BFA_STATUS_NO_PORT_INSTANCE = 176, + BFA_STATUS_BOOT_CODE_TIMEDOUT = 177, + BFA_STATUS_NO_VPORT_LOCK = 178, + BFA_STATUS_VPORT_NO_CNFG = 179, + BFA_STATUS_MAX_VAL +}; + +enum bfa_eproto_status { + BFA_EPROTO_BAD_ACCEPT = 0, + BFA_EPROTO_UNKNOWN_RSP = 1 +}; + +#endif /* __BFA_DEFS_STATUS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c new file mode 100644 index 0000000000..b07522ac3e --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -0,0 +1,3377 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_ioc.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +/* IOC local definitions */ + +/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */ + +#define bfa_ioc_firmware_lock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc)) +#define bfa_ioc_firmware_unlock(__ioc) \ + ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) +#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) +#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) +#define bfa_ioc_notify_fail(__ioc) \ + ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc)) +#define bfa_ioc_sync_start(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_start(__ioc)) +#define bfa_ioc_sync_join(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_join(__ioc)) +#define bfa_ioc_sync_leave(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc)) +#define bfa_ioc_sync_ack(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc)) +#define bfa_ioc_sync_complete(__ioc) \ + ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc)) +#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate)) +#define bfa_ioc_get_cur_ioc_fwstate(__ioc) \ + ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc)) +#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \ + ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate)) + +static bool bfa_nw_auto_recover = true; + +/* + * forward declarations + */ +static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc); +static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc); +static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc); +static void bfa_ioc_send_enable(struct bfa_ioc *ioc); +static void bfa_ioc_send_disable(struct bfa_ioc *ioc); +static void bfa_ioc_send_getattr(struct bfa_ioc *ioc); +static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc); +static void bfa_ioc_hb_stop(struct bfa_ioc *ioc); +static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force); +static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); +static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); +static void bfa_ioc_recover(struct bfa_ioc *ioc); +static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); +static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); +static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); +static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc); +static void bfa_ioc_fail_notify(struct bfa_ioc *ioc); +static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc); +static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); +static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); +static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc, + enum bfi_fwboot_type boot_type, u32 boot_param); +static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); +static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, + char *serial_num); +static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, + char *fw_ver); +static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, + char *chip_rev); +static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, + char *optrom_ver); +static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, + char *manufacturer); +static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); +static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); + +/* IOC state machine definitions/declarations */ +enum ioc_event { + IOC_E_RESET = 1, /*!< IOC reset request */ + IOC_E_ENABLE = 2, /*!< IOC enable request */ + IOC_E_DISABLE = 3, /*!< IOC disable request */ + IOC_E_DETACH = 4, /*!< driver detach cleanup */ + IOC_E_ENABLED = 5, /*!< f/w enabled */ + IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */ + IOC_E_DISABLED = 7, /*!< f/w disabled */ + IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */ + IOC_E_HBFAIL = 9, /*!< heartbeat failure */ + IOC_E_HWERROR = 10, /*!< hardware error interrupt */ + IOC_E_TIMEOUT = 11, /*!< timeout */ + IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */ +}; + +bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); +bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event); + +static struct ioc_sm_table_s ioc_sm_table[] = { + {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT}, + {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, + {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING}, + {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, + {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, + {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL}, + {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL}, + {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, + {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, + {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL}, +}; + +/* + * Forward declareations for iocpf state machine + */ +static void bfa_iocpf_enable(struct bfa_ioc *ioc); +static void bfa_iocpf_disable(struct bfa_ioc *ioc); +static void bfa_iocpf_fail(struct bfa_ioc *ioc); +static void bfa_iocpf_initfail(struct bfa_ioc *ioc); +static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc); +static void bfa_iocpf_stop(struct bfa_ioc *ioc); + +/* IOCPF state machine events */ +enum iocpf_event { + IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */ + IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */ + IOCPF_E_STOP = 3, /*!< stop on driver detach */ + IOCPF_E_FWREADY = 4, /*!< f/w initialization done */ + IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */ + IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */ + IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */ + IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */ + IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */ + IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ + IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */ + IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */ +}; + +/* IOCPF states */ +enum bfa_iocpf_state { + BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */ + BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ + BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */ + BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */ + BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */ + BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */ + BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */ + BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */ + BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */ +}; + +bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf, + enum iocpf_event); +bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event); + +static struct iocpf_sm_table_s iocpf_sm_table[] = { + {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET}, + {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH}, + {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT}, + {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT}, + {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY}, + {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL}, + {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL}, + {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING}, + {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED}, +}; + +/* IOC State Machine */ + +/* Beginning state. IOC uninit state. */ +static void +bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC is in uninit state. */ +static void +bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_RESET: + bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Reset entry actions -- initialize state machine */ +static void +bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) +{ + bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset); +} + +/* IOC is in reset state. */ +static void +bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + bfa_ioc_disable_comp(ioc); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_enable(ioc); +} + +/* Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + fallthrough; + case IOC_E_HWERROR: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +/* Semaphore should be acquired for version check. */ +static void +bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) +{ + mod_timer(&ioc->ioc_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_getattr(ioc); +} + +/* IOC configuration in progress. Timer is active. */ +static void +bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_FWRSP_GETATTR: + del_timer(&ioc->ioc_timer); + bfa_fsm_set_state(ioc, bfa_ioc_sm_op); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + del_timer(&ioc->ioc_timer); + fallthrough; + case IOC_E_TIMEOUT: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_getattrfail(ioc); + break; + + case IOC_E_DISABLE: + del_timer(&ioc->ioc_timer); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) +{ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); + bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); + bfa_ioc_hb_monitor(ioc); +} + +static void +bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_ioc_hb_stop(ioc); + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + bfa_ioc_hb_stop(ioc); + fallthrough; + + case IOC_E_HBFAIL: + if (ioc->iocpf.auto_recover) + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry); + else + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + + bfa_ioc_fail_notify(ioc); + + if (event != IOC_E_PFFAILED) + bfa_iocpf_fail(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) +{ + bfa_iocpf_disable(ioc); +} + +/* IOC is being disabled */ +static void +bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_DISABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); + break; + + case IOC_E_HWERROR: + /* + * No state change. Will move to disabled state + * after iocpf sm completes failure processing and + * moves to disabled state. + */ + bfa_iocpf_fail(ioc); + break; + + case IOC_E_HWFAILED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + bfa_ioc_disable_comp(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOC disable completion entry. */ +static void +bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) +{ + bfa_ioc_disable_comp(ioc); +} + +static void +bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc) +{ +} + +/* Hardware initialization retry. */ +static void +bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLED: + bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); + break; + + case IOC_E_PFFAILED: + case IOC_E_HWERROR: + /** + * Initialization retry failed. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_fail); + if (event != IOC_E_PFFAILED) + bfa_iocpf_initfail(ioc); + break; + + case IOC_E_HWFAILED: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail); + break; + + case IOC_E_ENABLE: + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC failure. */ +static void +bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_iocpf_stop(ioc); + break; + + case IOC_E_HWERROR: + /* HB failure notification, ignore. */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc) +{ +} + +/* IOC failure. */ +static void +bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event) +{ + switch (event) { + + case IOC_E_ENABLE: + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); + break; + + case IOC_E_DISABLE: + ioc->cbfn->disable_cbfn(ioc->bfa); + break; + + case IOC_E_DETACH: + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOCPF State Machine */ + +/* Reset entry actions -- initialize state machine */ +static void +bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf) +{ + iocpf->fw_mismatch_notified = false; + iocpf->auto_recover = bfa_nw_auto_recover; +} + +/* Beginning state. IOC is in reset state. */ +static void +bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_STOP: + break; + + default: + bfa_sm_fault(event); + } +} + +/* Semaphore should be acquired for version check. */ +static void +bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_init(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Awaiting h/w semaphore to continue with version check. */ +static void +bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_firmware_lock(ioc)) { + if (bfa_ioc_sync_start(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_ioc_firmware_unlock(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + } else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Notify enable completion callback */ +static void +bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf) +{ + /* Call only the first time sm enters fwmismatch state. */ + if (!iocpf->fw_mismatch_notified) + bfa_ioc_pf_fwmismatch(iocpf->ioc); + + iocpf->fw_mismatch_notified = true; + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); +} + +/* Awaiting firmware version match. */ +static void +bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_TIMEOUT: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + bfa_ioc_pf_disabled(ioc); + break; + + case IOCPF_E_STOP: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +/* Request for semaphore. */ +static void +bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Awaiting semaphore for h/w initialzation. */ +static void +bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + if (bfa_ioc_sync_complete(ioc)) { + bfa_ioc_sync_join(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + } else { + bfa_nw_ioc_hw_sem_release(ioc); + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf) +{ + iocpf->poll_time = 0; + bfa_ioc_reset(iocpf->ioc, false); +} + +/* Hardware is being initialized. Interrupts are enabled. + * Holding hardware semaphore lock. + */ +static void +bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWREADY: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling); + break; + + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + /** + * Enable Interrupts before sending fw IOC ENABLE cmd. + */ + iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa); + bfa_ioc_send_enable(iocpf->ioc); +} + +/* Host IOC function is being enabled, awaiting response from firmware. + * Semaphore is acquired. + */ +static void +bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_ENABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready); + break; + + case IOCPF_E_INITFAIL: + del_timer(&ioc->iocpf_timer); + fallthrough; + + case IOCPF_E_TIMEOUT: + bfa_nw_ioc_hw_sem_release(ioc); + if (event == IOCPF_E_TIMEOUT) + bfa_ioc_pf_failed(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_pf_enabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling); + break; + + case IOCPF_E_GETATTRFAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync); + break; + + case IOCPF_E_FAIL: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf) +{ + mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_TOV)); + bfa_ioc_send_disable(iocpf->ioc); +} + +/* IOC is being disabled */ +static void +bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_FWRSP_DISABLE: + del_timer(&ioc->iocpf_timer); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + del_timer(&ioc->iocpf_timer); + fallthrough; + + case IOCPF_E_TIMEOUT: + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FWRSP_ENABLE: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* IOC hb ack request is being removed. */ +static void +bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_leave(ioc); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +/* IOC disable completion entry. */ +static void +bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf) +{ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_pf_disabled(iocpf->ioc); +} + +static void +bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_ENABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf) +{ + bfa_nw_ioc_debug_save_ftrc(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* Hardware initialization failed. */ +static void +bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_notify_fail(ioc); + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail); + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_STOP: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf) +{ +} + +/* Hardware initialization failed. */ +static void +bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + case IOCPF_E_STOP: + bfa_ioc_firmware_unlock(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf) +{ + /** + * Mark IOC as failed in hardware and stop firmware. + */ + bfa_ioc_lpu_stop(iocpf->ioc); + + /** + * Flush any queued up mailbox requests. + */ + bfa_ioc_mbox_flush(iocpf->ioc); + bfa_ioc_hw_sem_get(iocpf->ioc); +} + +/* IOC is in failed state. */ +static void +bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + struct bfa_ioc *ioc = iocpf->ioc; + + switch (event) { + case IOCPF_E_SEMLOCKED: + bfa_ioc_sync_ack(ioc); + bfa_ioc_notify_fail(ioc); + if (!iocpf->auto_recover) { + bfa_ioc_sync_leave(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL); + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + } else { + if (bfa_ioc_sync_complete(ioc)) + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit); + else { + bfa_nw_ioc_hw_sem_release(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait); + } + } + break; + + case IOCPF_E_SEM_ERROR: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail); + bfa_ioc_pf_hwfailed(ioc); + break; + + case IOCPF_E_DISABLE: + bfa_ioc_hw_sem_get_cancel(ioc); + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync); + break; + + case IOCPF_E_FAIL: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf) +{ +} + +/* IOC is in failed state. */ +static void +bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event) +{ + switch (event) { + case IOCPF_E_DISABLE: + bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled); + break; + + default: + bfa_sm_fault(event); + } +} + +/* BFA IOC private functions */ + +/* Notify common modules registered for notification. */ +static void +bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event) +{ + struct bfa_ioc_notify *notify; + + list_for_each_entry(notify, &ioc->notify_q, qe) + notify->cbfn(notify->cbarg, event); +} + +static void +bfa_ioc_disable_comp(struct bfa_ioc *ioc) +{ + ioc->cbfn->disable_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED); +} + +bool +bfa_nw_ioc_sem_get(void __iomem *sem_reg) +{ + u32 r32; + int cnt = 0; +#define BFA_SEM_SPINCNT 3000 + + r32 = readl(sem_reg); + + while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) { + cnt++; + udelay(2); + r32 = readl(sem_reg); + } + + if (!(r32 & 1)) + return true; + + return false; +} + +void +bfa_nw_ioc_sem_release(void __iomem *sem_reg) +{ + readl(sem_reg); + writel(1, sem_reg); +} + +/* Clear fwver hdr */ +static void +bfa_ioc_fwver_clear(struct bfa_ioc *ioc) +{ + u32 pgnum, loff = 0; + int i; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) { + writel(0, ioc->ioc_regs.smem_page_start + loff); + loff += sizeof(u32); + } +} + + +static void +bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) +{ + struct bfi_ioc_image_hdr fwhdr; + u32 fwstate, r32; + + /* Spin on init semaphore to serialize. */ + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + while (r32 & 0x1) { + udelay(20); + r32 = readl(ioc->ioc_regs.ioc_init_sem_reg); + } + + fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + if (fwstate == BFI_IOC_UNINIT) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return; + } + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + + if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) { + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return; + } + + bfa_ioc_fwver_clear(ioc); + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT); + + /* + * Try to lock and then unlock the semaphore. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + writel(1, ioc->ioc_regs.ioc_sem_reg); + + /* Unlock init semaphore */ + writel(1, ioc->ioc_regs.ioc_init_sem_reg); +} + +static void +bfa_ioc_hw_sem_get(struct bfa_ioc *ioc) +{ + u32 r32; + + /** + * First read to the semaphore register will return 0, subsequent reads + * will return 1. Semaphore is released by writing 1 to the register + */ + r32 = readl(ioc->ioc_regs.ioc_sem_reg); + if (r32 == ~0) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR); + return; + } + if (!(r32 & 1)) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED); + return; + } + + mod_timer(&ioc->sem_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HWSEM_TOV)); +} + +void +bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc) +{ + writel(1, ioc->ioc_regs.ioc_sem_reg); +} + +static void +bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc) +{ + del_timer(&ioc->sem_timer); +} + +/* Initialize LPU local memory (aka secondary memory / SRAM) */ +static void +bfa_ioc_lmem_init(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + int i; +#define PSS_LMEM_INIT_TIME 10000 + + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LMEM_RESET; + pss_ctl |= __PSS_LMEM_INIT_EN; + + /* + * i2c workaround 12.5khz clock + */ + pss_ctl |= __PSS_I2C_CLK_DIV(3UL); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); + + /** + * wait for memory initialization to be complete + */ + i = 0; + do { + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + i++; + } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME)); + + /** + * If memory initialization is not successful, IOC timeout will catch + * such failures. + */ + BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE)); + + pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN); + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_start(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Take processor out of reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl &= ~__PSS_LPU0_RESET; + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +static void +bfa_ioc_lpu_stop(struct bfa_ioc *ioc) +{ + u32 pss_ctl; + + /** + * Put processors in reset. + */ + pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg); + pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET); + + writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg); +} + +/* Get driver and firmware versions. */ +void +bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + u32 pgnum; + u32 loff = 0; + int i; + u32 *fwsig = (u32 *) fwhdr; + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); + i++) { + fwsig[i] = + swab32(readl(loff + ioc->ioc_regs.smem_page_start)); + loff += sizeof(u32); + } +} + +static bool +bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1, + struct bfi_ioc_image_hdr *fwhdr_2) +{ + int i; + + for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) { + if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i]) + return false; + } + + return true; +} + +/* Returns TRUE if major minor and maintenance are same. + * If patch version are same, check for MD5 Checksum to be same. + */ +static bool +bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (drv_fwhdr->signature != fwhdr_to_cmp->signature) + return false; + if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major) + return false; + if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor) + return false; + if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint) + return false; + if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch && + drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase && + drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build) + return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp); + + return true; +} + +static bool +bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr) +{ + if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF) + return false; + + return true; +} + +static bool +fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr) +{ + if (fwhdr->fwver.phase == 0 && + fwhdr->fwver.build == 0) + return false; + + return true; +} + +/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */ +static enum bfi_ioc_img_ver_cmp +bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr, + struct bfi_ioc_image_hdr *fwhdr_to_cmp) +{ + if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_INCOMP; + + if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch) + return BFI_IOC_IMG_VER_OLD; + + /* GA takes priority over internal builds of the same patch stream. + * At this point major minor maint and patch numbers are same. + */ + if (fwhdr_is_ga(base_fwhdr)) + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_SAME; + else + return BFI_IOC_IMG_VER_OLD; + else + if (fwhdr_is_ga(fwhdr_to_cmp)) + return BFI_IOC_IMG_VER_BETTER; + + if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase) + return BFI_IOC_IMG_VER_OLD; + + if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_BETTER; + else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build) + return BFI_IOC_IMG_VER_OLD; + + /* All Version Numbers are equal. + * Md5 check to be done as a part of compatibility check. + */ + return BFI_IOC_IMG_VER_SAME; +} + +/* register definitions */ +#define FLI_CMD_REG 0x0001d000 +#define FLI_WRDATA_REG 0x0001d00c +#define FLI_RDDATA_REG 0x0001d010 +#define FLI_ADDR_REG 0x0001d004 +#define FLI_DEV_STATUS_REG 0x0001d014 + +#define BFA_FLASH_FIFO_SIZE 128 /* fifo size */ +#define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */ +#define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */ +#define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */ + +#define NFC_STATE_RUNNING 0x20000001 +#define NFC_STATE_PAUSED 0x00004560 +#define NFC_VER_VALID 0x147 + +enum bfa_flash_cmd { + BFA_FLASH_FAST_READ = 0x0b, /* fast read */ + BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */ + BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */ + BFA_FLASH_WRITE = 0x02, /* write */ + BFA_FLASH_READ_STATUS = 0x05, /* read status */ +}; + +/* hardware error definition */ +enum bfa_flash_err { + BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */ + BFA_FLASH_UNINIT = -2, /*!< flash not initialized */ + BFA_FLASH_BAD = -3, /*!< flash bad */ + BFA_FLASH_BUSY = -4, /*!< flash busy */ + BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */ + BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */ + BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */ + BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */ + BFA_FLASH_ERR_LEN = -9, /*!< invalid length */ +}; + +/* flash command register data structure */ +union bfa_flash_cmd_reg { + struct { +#ifdef __BIG_ENDIAN + u32 act:1; + u32 rsv:1; + u32 write_cnt:9; + u32 read_cnt:9; + u32 addr_cnt:4; + u32 cmd:8; +#else + u32 cmd:8; + u32 addr_cnt:4; + u32 read_cnt:9; + u32 write_cnt:9; + u32 rsv:1; + u32 act:1; +#endif + } r; + u32 i; +}; + +/* flash device status register data structure */ +union bfa_flash_dev_status_reg { + struct { +#ifdef __BIG_ENDIAN + u32 rsv:21; + u32 fifo_cnt:6; + u32 busy:1; + u32 init_status:1; + u32 present:1; + u32 bad:1; + u32 good:1; +#else + u32 good:1; + u32 bad:1; + u32 present:1; + u32 init_status:1; + u32 busy:1; + u32 fifo_cnt:6; + u32 rsv:21; +#endif + } r; + u32 i; +}; + +/* flash address register data structure */ +union bfa_flash_addr_reg { + struct { +#ifdef __BIG_ENDIAN + u32 addr:24; + u32 dummy:8; +#else + u32 dummy:8; + u32 addr:24; +#endif + } r; + u32 i; +}; + +/* Flash raw private functions */ +static void +bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt, + u8 rd_cnt, u8 ad_cnt, u8 op) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = 0; + cmd.r.act = 1; + cmd.r.write_cnt = wr_cnt; + cmd.r.read_cnt = rd_cnt; + cmd.r.addr_cnt = ad_cnt; + cmd.r.cmd = op; + writel(cmd.i, (pci_bar + FLI_CMD_REG)); +} + +static void +bfa_flash_set_addr(void __iomem *pci_bar, u32 address) +{ + union bfa_flash_addr_reg addr; + + addr.r.addr = address & 0x00ffffff; + addr.r.dummy = 0; + writel(addr.i, (pci_bar + FLI_ADDR_REG)); +} + +static int +bfa_flash_cmd_act_check(void __iomem *pci_bar) +{ + union bfa_flash_cmd_reg cmd; + + cmd.i = readl(pci_bar + FLI_CMD_REG); + + if (cmd.r.act) + return BFA_FLASH_ERR_CMD_ACT; + + return 0; +} + +/* Flush FLI data fifo. */ +static int +bfa_flash_fifo_flush(void __iomem *pci_bar) +{ + u32 i; + union bfa_flash_dev_status_reg dev_status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + + if (!dev_status.r.fifo_cnt) + return 0; + + /* fifo counter in terms of words */ + for (i = 0; i < dev_status.r.fifo_cnt; i++) + readl(pci_bar + FLI_RDDATA_REG); + + /* Check the device status. It may take some time. */ + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + break; + } + + if (dev_status.r.fifo_cnt) + return BFA_FLASH_ERR_FIFO_CNT; + + return 0; +} + +/* Read flash status. */ +static int +bfa_flash_status_read(void __iomem *pci_bar) +{ + union bfa_flash_dev_status_reg dev_status; + int status; + u32 ret_status; + int i; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS); + + for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) { + status = bfa_flash_cmd_act_check(pci_bar); + if (!status) + break; + } + + if (status) + return status; + + dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG); + if (!dev_status.r.fifo_cnt) + return BFA_FLASH_BUSY; + + ret_status = readl(pci_bar + FLI_RDDATA_REG); + ret_status >>= 24; + + status = bfa_flash_fifo_flush(pci_bar); + if (status < 0) + return status; + + return ret_status; +} + +/* Start flash read operation. */ +static int +bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len, + char *buf) +{ + int status; + + /* len must be mutiple of 4 and not exceeding fifo size */ + if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0) + return BFA_FLASH_ERR_LEN; + + /* check status */ + status = bfa_flash_status_read(pci_bar); + if (status == BFA_FLASH_BUSY) + status = bfa_flash_status_read(pci_bar); + + if (status < 0) + return status; + + /* check if write-in-progress bit is cleared */ + if (status & BFA_FLASH_WIP_MASK) + return BFA_FLASH_ERR_WIP; + + bfa_flash_set_addr(pci_bar, offset); + + bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ); + + return 0; +} + +/* Check flash read operation. */ +static u32 +bfa_flash_read_check(void __iomem *pci_bar) +{ + if (bfa_flash_cmd_act_check(pci_bar)) + return 1; + + return 0; +} + +/* End flash read operation. */ +static void +bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf) +{ + u32 i; + + /* read data fifo up to 32 words */ + for (i = 0; i < len; i += 4) { + u32 w = readl(pci_bar + FLI_RDDATA_REG); + *((u32 *)(buf + i)) = swab32(w); + } + + bfa_flash_fifo_flush(pci_bar); +} + +/* Perform flash raw read. */ + +#define FLASH_BLOCKING_OP_MAX 500 +#define FLASH_SEM_LOCK_REG 0x18820 + +static int +bfa_raw_sem_get(void __iomem *bar) +{ + int locked; + + locked = readl(bar + FLASH_SEM_LOCK_REG); + + return !locked; +} + +static enum bfa_status +bfa_flash_sem_get(void __iomem *bar) +{ + u32 n = FLASH_BLOCKING_OP_MAX; + + while (!bfa_raw_sem_get(bar)) { + if (--n <= 0) + return BFA_STATUS_BADFLASH; + mdelay(10); + } + return BFA_STATUS_OK; +} + +static void +bfa_flash_sem_put(void __iomem *bar) +{ + writel(0, (bar + FLASH_SEM_LOCK_REG)); +} + +static enum bfa_status +bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf, + u32 len) +{ + u32 n; + int status; + u32 off, l, s, residue, fifo_sz; + + residue = len; + off = 0; + fifo_sz = BFA_FLASH_FIFO_SIZE; + status = bfa_flash_sem_get(pci_bar); + if (status != BFA_STATUS_OK) + return status; + + while (residue) { + s = offset + off; + n = s / fifo_sz; + l = (n + 1) * fifo_sz - s; + if (l > residue) + l = residue; + + status = bfa_flash_read_start(pci_bar, offset + off, l, + &buf[off]); + if (status < 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + + n = BFA_FLASH_BLOCKING_OP_MAX; + while (bfa_flash_read_check(pci_bar)) { + if (--n <= 0) { + bfa_flash_sem_put(pci_bar); + return BFA_STATUS_FAILED; + } + } + + bfa_flash_read_end(pci_bar, l, &buf[off]); + + residue -= l; + off += l; + } + bfa_flash_sem_put(pci_bar); + + return BFA_STATUS_OK; +} + +#define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */ + +static enum bfa_status +bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off, + u32 *fwimg) +{ + return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva, + BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)), + (char *)fwimg, BFI_FLASH_CHUNK_SZ); +} + +static enum bfi_ioc_img_ver_cmp +bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *base_fwhdr) +{ + struct bfi_ioc_image_hdr *flash_fwhdr; + enum bfa_status status; + u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS]; + + status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg); + if (status != BFA_STATUS_OK) + return BFI_IOC_IMG_VER_INCOMP; + + flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg; + if (bfa_ioc_flash_fwver_valid(flash_fwhdr)) + return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr); + else + return BFI_IOC_IMG_VER_INCOMP; +} + +/* + * Returns TRUE if driver is willing to work with current smem f/w version. + */ +bool +bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr) +{ + struct bfi_ioc_image_hdr *drv_fwhdr; + enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp; + + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + + /* If smem is incompatible or old, driver should not work with it. */ + drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr); + if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP || + drv_smem_cmp == BFI_IOC_IMG_VER_OLD) { + return false; + } + + /* IF Flash has a better F/W than smem do not work with smem. + * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it. + * If Flash is old or incomp work with smem iff smem f/w == drv f/w. + */ + smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr); + + if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER) + return false; + else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME) + return true; + else + return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ? + true : false; +} + +/* Return true if current running version is valid. Firmware signature and + * execution context (driver/bios) must match. + */ +static bool +bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env) +{ + struct bfi_ioc_image_hdr fwhdr; + + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + if (swab32(fwhdr.bootenv) != boot_env) + return false; + + return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr); +} + +/* Conditionally flush any pending message from firmware at start. */ +static void +bfa_ioc_msgflush(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if (r32) + writel(1, ioc->ioc_regs.lpu_mbox_cmd); +} + +static void +bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force) +{ + enum bfi_ioc_state ioc_fwstate; + bool fwvalid; + u32 boot_env; + + ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + if (force) + ioc_fwstate = BFI_IOC_UNINIT; + + boot_env = BFI_FWBOOT_ENV_OS; + + /** + * check if firmware is valid + */ + fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ? + false : bfa_ioc_fwver_valid(ioc, boot_env); + + if (!fwvalid) { + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); + + return; + } + + /** + * If hardware initialization is in progress (initialized by other IOC), + * just wait for an initialization completion interrupt. + */ + if (ioc_fwstate == BFI_IOC_INITING) { + bfa_ioc_poll_fwinit(ioc); + return; + } + + /** + * If IOC function is disabled and firmware version is same, + * just re-enable IOC. + */ + if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) { + /** + * When using MSI-X any pending firmware ready event should + * be flushed. Otherwise MSI-X interrupts are not delivered. + */ + bfa_ioc_msgflush(ioc); + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + /** + * Initialize the h/w for any other states. + */ + if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) == + BFA_STATUS_OK) + bfa_ioc_poll_fwinit(ioc); +} + +void +bfa_nw_ioc_timeout(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_TIMEOUT); +} + +static void +bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len) +{ + u32 *msgp = (u32 *) ioc_msg; + u32 i; + + BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX)); + + /* + * first write msg to mailbox registers + */ + for (i = 0; i < len / sizeof(u32); i++) + writel(cpu_to_le32(msgp[i]), + ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++) + writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32)); + + /* + * write 1 to mailbox CMD to trigger LPU event + */ + writel(1, ioc->ioc_regs.hfn_mbox_cmd); + (void) readl(ioc->ioc_regs.hfn_mbox_cmd); +} + +static void +bfa_ioc_send_enable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req enable_req; + + bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ, + bfa_ioc_portid(ioc)); + enable_req.clscode = htons(ioc->clscode); + enable_req.rsvd = htons(0); + /* overflow in 2106 */ + enable_req.tv_sec = ntohl(ktime_get_real_seconds()); + bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_disable(struct bfa_ioc *ioc) +{ + struct bfi_ioc_ctrl_req disable_req; + + bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ, + bfa_ioc_portid(ioc)); + disable_req.clscode = htons(ioc->clscode); + disable_req.rsvd = htons(0); + /* overflow in 2106 */ + disable_req.tv_sec = ntohl(ktime_get_real_seconds()); + bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req)); +} + +static void +bfa_ioc_send_getattr(struct bfa_ioc *ioc) +{ + struct bfi_ioc_getattr_req attr_req; + + bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ, + bfa_ioc_portid(ioc)); + bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa); + bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req)); +} + +void +bfa_nw_ioc_hb_check(struct bfa_ioc *ioc) +{ + u32 hb_count; + + hb_count = readl(ioc->ioc_regs.heartbeat); + if (ioc->hb_count == hb_count) { + bfa_ioc_recover(ioc); + return; + } else { + ioc->hb_count = hb_count; + } + + bfa_ioc_mbox_poll(ioc); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_monitor(struct bfa_ioc *ioc) +{ + ioc->hb_count = readl(ioc->ioc_regs.heartbeat); + mod_timer(&ioc->hb_timer, jiffies + + msecs_to_jiffies(BFA_IOC_HB_TOV)); +} + +static void +bfa_ioc_hb_stop(struct bfa_ioc *ioc) +{ + del_timer(&ioc->hb_timer); +} + +/* Initiate a full firmware download. */ +static enum bfa_status +bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type, + u32 boot_env) +{ + u32 *fwimg; + u32 pgnum; + u32 loff = 0; + u32 chunkno = 0; + u32 i; + u32 asicmode; + u32 fwimg_size; + u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS]; + enum bfa_status status; + + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32); + + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)); + fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + + pgnum = bfa_ioc_smem_pgnum(ioc, loff); + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + for (i = 0; i < fwimg_size; i++) { + if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) { + chunkno = BFA_IOC_FLASH_CHUNK_NO(i); + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + status = bfa_nw_ioc_flash_img_get_chnk(ioc, + BFA_IOC_FLASH_CHUNK_ADDR(chunkno), + fwimg_buf); + if (status != BFA_STATUS_OK) + return status; + + fwimg = fwimg_buf; + } else { + fwimg = bfa_cb_image_get_chunk( + bfa_ioc_asic_gen(ioc), + BFA_IOC_FLASH_CHUNK_ADDR(chunkno)); + } + } + + /** + * write smem + */ + writel(swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]), + ioc->ioc_regs.smem_page_start + loff); + + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, + ioc->ioc_regs.host_page_num_fn); + } + } + + writel(bfa_ioc_smem_pgnum(ioc, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * Set boot type, env and device mode at the end. + */ + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_FLASH) { + boot_type = BFI_FWBOOT_TYPE_NORMAL; + } + asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode, + ioc->port0_mode, ioc->port1_mode); + writel(asicmode, ((ioc->ioc_regs.smem_page_start) + + BFI_FWBOOT_DEVMODE_OFF)); + writel(boot_type, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_TYPE_OFF))); + writel(boot_env, ((ioc->ioc_regs.smem_page_start) + + (BFI_FWBOOT_ENV_OFF))); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_reset(struct bfa_ioc *ioc, bool force) +{ + bfa_ioc_hwinit(ioc, force); +} + +/* BFA ioc enable reply by firmware */ +static void +bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode, + u8 cap_bm) +{ + struct bfa_iocpf *iocpf = &ioc->iocpf; + + ioc->port_mode = ioc->port_mode_cfg = port_mode; + ioc->ad_cap_bm = cap_bm; + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE); +} + +/* Update BFA configuration from firmware configuration. */ +static void +bfa_ioc_getattr_reply(struct bfa_ioc *ioc) +{ + struct bfi_ioc_attr *attr = ioc->attr; + + attr->adapter_prop = ntohl(attr->adapter_prop); + attr->card_type = ntohl(attr->card_type); + attr->maxfrsize = ntohs(attr->maxfrsize); + + bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR); +} + +/* Attach time initialization of mbox logic. */ +static void +bfa_ioc_mbox_attach(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + int mc; + + INIT_LIST_HEAD(&mod->cmd_q); + for (mc = 0; mc < BFI_MC_MAX; mc++) { + mod->mbhdlr[mc].cbfn = NULL; + mod->mbhdlr[mc].cbarg = ioc->bfa; + } +} + +/* Mbox poll timer -- restarts any pending mailbox requests. */ +static void +bfa_ioc_mbox_poll(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 stat; + + /** + * If no command pending, do nothing + */ + if (list_empty(&mod->cmd_q)) + return; + + /** + * If previous command is not yet fetched by firmware, do nothing + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) + return; + + /** + * Enqueue command to firmware. + */ + cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); + list_del(&cmd->qe); + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + /** + * Give a callback to the client, indicating that the command is sent + */ + if (cmd->cbfn) { + cbfn = cmd->cbfn; + cbarg = cmd->cbarg; + cmd->cbfn = NULL; + cbfn(cbarg); + } +} + +/* Cleanup any pending requests. */ +static void +bfa_ioc_mbox_flush(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfa_mbox_cmd *cmd; + + while (!list_empty(&mod->cmd_q)) { + cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); + list_del(&cmd->qe); + } +} + +/** + * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap + * + * @ioc: memory for IOC + * @tbuf: app memory to store data from smem + * @soff: smem offset + * @sz: size of smem in bytes + */ +static int +bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz) +{ + u32 pgnum, loff, r32; + int i, len; + u32 *buf = tbuf; + + pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff); + loff = PSS_SMEM_PGOFF(soff); + + /* + * Hold semaphore to serialize pll init and fwtrc. + */ + if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg)) + return 1; + + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + + len = sz/sizeof(u32); + for (i = 0; i < len; i++) { + r32 = swab32(readl(loff + ioc->ioc_regs.smem_page_start)); + buf[i] = be32_to_cpu(r32); + loff += sizeof(u32); + + /** + * handle page offset wrap around + */ + loff = PSS_SMEM_PGOFF(loff); + if (loff == 0) { + pgnum++; + writel(pgnum, ioc->ioc_regs.host_page_num_fn); + } + } + + writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0), + ioc->ioc_regs.host_page_num_fn); + + /* + * release semaphore + */ + readl(ioc->ioc_regs.ioc_init_sem_reg); + writel(1, ioc->ioc_regs.ioc_init_sem_reg); + return 0; +} + +/* Retrieve saved firmware trace from a prior IOC failure. */ +int +bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id; + int tlen, status = 0; + + tlen = *trclen; + if (tlen > BNA_DBG_FWTRC_LEN) + tlen = BNA_DBG_FWTRC_LEN; + + status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen); + *trclen = tlen; + return status; +} + +/* Save firmware trace if configured. */ +static void +bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc) +{ + int tlen; + + if (ioc->dbg_fwsave_once) { + ioc->dbg_fwsave_once = false; + if (ioc->dbg_fwsave_len) { + tlen = ioc->dbg_fwsave_len; + bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen); + } + } +} + +/* Retrieve saved firmware trace from a prior IOC failure. */ +int +bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen) +{ + int tlen; + + if (ioc->dbg_fwsave_len == 0) + return BFA_STATUS_ENOFSAVE; + + tlen = *trclen; + if (tlen > ioc->dbg_fwsave_len) + tlen = ioc->dbg_fwsave_len; + + memcpy(trcdata, ioc->dbg_fwsave, tlen); + *trclen = tlen; + return BFA_STATUS_OK; +} + +static void +bfa_ioc_fail_notify(struct bfa_ioc *ioc) +{ + /** + * Notify driver and common modules registered for notification. + */ + ioc->cbfn->hbfail_cbfn(ioc->bfa); + bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED); + bfa_nw_ioc_debug_save_ftrc(ioc); +} + +/* IOCPF to IOC interface */ +static void +bfa_ioc_pf_enabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_ENABLED); +} + +static void +bfa_ioc_pf_disabled(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DISABLED); +} + +static void +bfa_ioc_pf_failed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_PFFAILED); +} + +static void +bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_HWFAILED); +} + +static void +bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc) +{ + /** + * Provide enable completion callback and AEN notification. + */ + ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); +} + +/* IOC public */ +static enum bfa_status +bfa_ioc_pll_init(struct bfa_ioc *ioc) +{ + /* + * Hold semaphore so that nobody can access the chip during init. + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg); + + bfa_ioc_pll_init_asic(ioc); + + ioc->pllinit = true; + + /* Initialize LMEM */ + bfa_ioc_lmem_init(ioc); + + /* + * release semaphore. + */ + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg); + + return BFA_STATUS_OK; +} + +/* Interface used by diag module to do firmware boot with memory test + * as the entry vector. + */ +static enum bfa_status +bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type, + u32 boot_env) +{ + struct bfi_ioc_image_hdr *drv_fwhdr; + enum bfa_status status; + bfa_ioc_stats(ioc, ioc_boots); + + if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK) + return BFA_STATUS_FAILED; + if (boot_env == BFI_FWBOOT_ENV_OS && + boot_type == BFI_FWBOOT_TYPE_NORMAL) { + drv_fwhdr = (struct bfi_ioc_image_hdr *) + bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0); + /* Work with Flash iff flash f/w is better than driver f/w. + * Otherwise push drivers firmware. + */ + if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) == + BFI_IOC_IMG_VER_BETTER) + boot_type = BFI_FWBOOT_TYPE_FLASH; + } + + /** + * Initialize IOC state of all functions on a chip reset. + */ + if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST); + } else { + bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING); + bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING); + } + + bfa_ioc_msgflush(ioc); + status = bfa_ioc_download_fw(ioc, boot_type, boot_env); + if (status == BFA_STATUS_OK) + bfa_ioc_lpu_start(ioc); + else + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); + + return status; +} + +/* Enable/disable IOC failure auto recovery. */ +void +bfa_nw_ioc_auto_recover(bool auto_recover) +{ + bfa_nw_auto_recover = auto_recover; +} + +static bool +bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg) +{ + u32 *msgp = mbmsg; + u32 r32; + int i; + + r32 = readl(ioc->ioc_regs.lpu_mbox_cmd); + if ((r32 & 1) == 0) + return false; + + /** + * read the MBOX msg + */ + for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32)); + i++) { + r32 = readl(ioc->ioc_regs.lpu_mbox + + i * sizeof(u32)); + msgp[i] = htonl(r32); + } + + /** + * turn off mailbox interrupt by clearing mailbox status + */ + writel(1, ioc->ioc_regs.lpu_mbox_cmd); + readl(ioc->ioc_regs.lpu_mbox_cmd); + + return true; +} + +static void +bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) +{ + union bfi_ioc_i2h_msg_u *msg; + struct bfa_iocpf *iocpf = &ioc->iocpf; + + msg = (union bfi_ioc_i2h_msg_u *) m; + + bfa_ioc_stats(ioc, ioc_isrs); + + switch (msg->mh.msg_id) { + case BFI_IOC_I2H_HBEAT: + break; + + case BFI_IOC_I2H_ENABLE_REPLY: + bfa_ioc_enable_reply(ioc, + (enum bfa_mode)msg->fw_event.port_mode, + msg->fw_event.cap_bm); + break; + + case BFI_IOC_I2H_DISABLE_REPLY: + bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE); + break; + + case BFI_IOC_I2H_GETATTR_REPLY: + bfa_ioc_getattr_reply(ioc); + break; + + default: + BUG_ON(1); + } +} + +/** + * bfa_nw_ioc_attach - IOC attach time initialization and setup. + * + * @ioc: memory for IOC + * @bfa: driver instance structure + * @cbfn: callback function + */ +void +bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn) +{ + ioc->bfa = bfa; + ioc->cbfn = cbfn; + ioc->fcmode = false; + ioc->pllinit = false; + ioc->dbg_fwsave_once = true; + ioc->iocpf.ioc = ioc; + + bfa_ioc_mbox_attach(ioc); + INIT_LIST_HEAD(&ioc->notify_q); + + bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit); + bfa_fsm_send_event(ioc, IOC_E_RESET); +} + +/* Driver detach time IOC cleanup. */ +void +bfa_nw_ioc_detach(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(ioc, IOC_E_DETACH); + + /* Done with detach, empty the notify_q. */ + INIT_LIST_HEAD(&ioc->notify_q); +} + +/** + * bfa_nw_ioc_pci_init - Setup IOC PCI properties. + * + * @ioc: memory for IOC + * @pcidev: PCI device information for this IOC + * @clscode: class code + */ +void +bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode) +{ + ioc->clscode = clscode; + ioc->pcidev = *pcidev; + + /** + * Initialize IOC and device personality + */ + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC; + ioc->asic_mode = BFI_ASIC_MODE_FC; + + switch (pcidev->device_id) { + case PCI_DEVICE_ID_BROCADE_CT: + ioc->asic_gen = BFI_ASIC_GEN_CT; + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + break; + + case BFA_PCI_DEVICE_ID_CT2: + ioc->asic_gen = BFI_ASIC_GEN_CT2; + if (clscode == BFI_PCIFN_CLASS_FC && + pcidev->ssid == BFA_PCI_CT2_SSID_FC) { + ioc->asic_mode = BFI_ASIC_MODE_FC16; + ioc->fcmode = true; + ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA; + ioc->ad_cap_bm = BFA_CM_HBA; + } else { + ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH; + ioc->asic_mode = BFI_ASIC_MODE_ETH; + if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_CNA; + ioc->ad_cap_bm = BFA_CM_CNA; + } else { + ioc->port_mode = + ioc->port_mode_cfg = BFA_MODE_NIC; + ioc->ad_cap_bm = BFA_CM_NIC; + } + } + break; + + default: + BUG_ON(1); + } + + /** + * Set asic specific interfaces. + */ + if (ioc->asic_gen == BFI_ASIC_GEN_CT) + bfa_nw_ioc_set_ct_hwif(ioc); + else { + WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2); + bfa_nw_ioc_set_ct2_hwif(ioc); + bfa_nw_ioc_ct2_poweron(ioc); + } + + bfa_ioc_map_port(ioc); + bfa_ioc_reg_init(ioc); +} + +/** + * bfa_nw_ioc_mem_claim - Initialize IOC dma memory + * + * @ioc: memory for IOC + * @dm_kva: kernel virtual address of IOC dma memory + * @dm_pa: physical address of IOC dma memory + */ +void +bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa) +{ + /** + * dma memory for firmware attribute + */ + ioc->attr_dma.kva = dm_kva; + ioc->attr_dma.pa = dm_pa; + ioc->attr = (struct bfi_ioc_attr *) dm_kva; +} + +/* Return size of dma memory required. */ +u32 +bfa_nw_ioc_meminfo(void) +{ + return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ); +} + +void +bfa_nw_ioc_enable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_enables); + ioc->dbg_fwsave_once = true; + + bfa_fsm_send_event(ioc, IOC_E_ENABLE); +} + +void +bfa_nw_ioc_disable(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_disables); + bfa_fsm_send_event(ioc, IOC_E_DISABLE); +} + +/* Initialize memory for saving firmware trace. */ +void +bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave) +{ + ioc->dbg_fwsave = dbg_fwsave; + ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0; +} + +static u32 +bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr) +{ + return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr); +} + +/* Register mailbox message handler function, to be called by common modules */ +void +bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + + mod->mbhdlr[mc].cbfn = cbfn; + mod->mbhdlr[mc].cbarg = cbarg; +} + +/** + * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware. + * + * @ioc: IOC instance + * @cmd: Mailbox command + * @cbfn: callback function + * @cbarg: arguments to callback + * + * Waits if mailbox is busy. Responsibility of caller to serialize + */ +bool +bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + u32 stat; + + cmd->cbfn = cbfn; + cmd->cbarg = cbarg; + + /** + * If a previous command is pending, queue new command + */ + if (!list_empty(&mod->cmd_q)) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * If mailbox is busy, queue command for poll timer + */ + stat = readl(ioc->ioc_regs.hfn_mbox_cmd); + if (stat) { + list_add_tail(&cmd->qe, &mod->cmd_q); + return true; + } + + /** + * mailbox is free -- queue command to firmware + */ + bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg)); + + return false; +} + +/* Handle mailbox interrupts */ +void +bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc) +{ + struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod; + struct bfi_mbmsg m; + int mc; + + if (bfa_ioc_msgget(ioc, &m)) { + /** + * Treat IOC message class as special. + */ + mc = m.mh.msg_class; + if (mc == BFI_MC_IOC) { + bfa_ioc_isr(ioc, &m); + return; + } + + if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL)) + return; + + mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m); + } + + bfa_ioc_lpu_read_stat(ioc); + + /** + * Try to send pending mailbox commands + */ + bfa_ioc_mbox_poll(ioc); +} + +void +bfa_nw_ioc_error_isr(struct bfa_ioc *ioc) +{ + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HWERROR); +} + +/* return true if IOC is disabled */ +bool +bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) || + bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled); +} + +/* return true if IOC is operational */ +bool +bfa_nw_ioc_is_operational(struct bfa_ioc *ioc) +{ + return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op); +} + +/* Add to IOC heartbeat failure notification queue. To be used by common + * modules such as cee, port, diag. + */ +void +bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify) +{ + list_add_tail(¬ify->qe, &ioc->notify_q); +} + +#define BFA_MFG_NAME "QLogic" +static void +bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc, + struct bfa_adapter_attr *ad_attr) +{ + struct bfi_ioc_attr *ioc_attr; + + ioc_attr = ioc->attr; + + bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num); + bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver); + bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver); + bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer); + memcpy(&ad_attr->vpd, &ioc_attr->vpd, + sizeof(struct bfa_mfg_vpd)); + + ad_attr->nports = bfa_ioc_get_nports(ioc); + ad_attr->max_speed = bfa_ioc_speed_sup(ioc); + + bfa_ioc_get_adapter_model(ioc, ad_attr->model); + /* For now, model descr uses same model string */ + bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr); + + ad_attr->card_type = ioc_attr->card_type; + ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type); + + if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop)) + ad_attr->prototype = 1; + else + ad_attr->prototype = 0; + + ad_attr->pwwn = bfa_ioc_get_pwwn(ioc); + bfa_nw_ioc_get_mac(ioc, ad_attr->mac); + + ad_attr->pcie_gen = ioc_attr->pcie_gen; + ad_attr->pcie_lanes = ioc_attr->pcie_lanes; + ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig; + ad_attr->asic_rev = ioc_attr->asic_rev; + + bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver); +} + +static enum bfa_ioc_type +bfa_ioc_get_type(struct bfa_ioc *ioc) +{ + if (ioc->clscode == BFI_PCIFN_CLASS_ETH) + return BFA_IOC_TYPE_LL; + + BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC)); + + return (ioc->attr->port_mode == BFI_PORT_MODE_FC) + ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE; +} + +static void +bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num) +{ + memcpy(serial_num, + (void *)ioc->attr->brcd_serialnum, + BFA_ADAPTER_SERIAL_NUM_LEN); +} + +static void +bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver) +{ + memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev) +{ + BUG_ON(!(chip_rev)); + + memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN); + + chip_rev[0] = 'R'; + chip_rev[1] = 'e'; + chip_rev[2] = 'v'; + chip_rev[3] = '-'; + chip_rev[4] = ioc->attr->asic_rev; + chip_rev[5] = '\0'; +} + +static void +bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) +{ + memcpy(optrom_ver, ioc->attr->optrom_version, + BFA_VERSION_LEN); +} + +static void +bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) +{ + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); +} + +static void +bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model) +{ + struct bfi_ioc_attr *ioc_attr; + + BUG_ON(!(model)); + memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN); + + ioc_attr = ioc->attr; + + snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u", + BFA_MFG_NAME, ioc_attr->card_type); +} + +static enum bfa_ioc_state +bfa_ioc_get_state(struct bfa_ioc *ioc) +{ + enum bfa_iocpf_state iocpf_st; + enum bfa_ioc_state ioc_st = ioc_sm_to_state(ioc_sm_table, ioc->fsm); + + if (ioc_st == BFA_IOC_ENABLING || + ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) { + + iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + switch (iocpf_st) { + case BFA_IOCPF_SEMWAIT: + ioc_st = BFA_IOC_SEMWAIT; + break; + + case BFA_IOCPF_HWINIT: + ioc_st = BFA_IOC_HWINIT; + break; + + case BFA_IOCPF_FWMISMATCH: + ioc_st = BFA_IOC_FWMISMATCH; + break; + + case BFA_IOCPF_FAIL: + ioc_st = BFA_IOC_FAIL; + break; + + case BFA_IOCPF_INITFAIL: + ioc_st = BFA_IOC_INITFAIL; + break; + + default: + break; + } + } + return ioc_st; +} + +void +bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr) +{ + memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr)); + + ioc_attr->state = bfa_ioc_get_state(ioc); + ioc_attr->port_id = bfa_ioc_portid(ioc); + ioc_attr->port_mode = ioc->port_mode; + + ioc_attr->port_mode_cfg = ioc->port_mode_cfg; + ioc_attr->cap_bm = ioc->ad_cap_bm; + + ioc_attr->ioc_type = bfa_ioc_get_type(ioc); + + bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr); + + ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc); + ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc); + ioc_attr->def_fn = bfa_ioc_is_default(ioc); + bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev); +} + +/* WWN public */ +static u64 +bfa_ioc_get_pwwn(struct bfa_ioc *ioc) +{ + return ioc->attr->pwwn; +} + +void +bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac) +{ + ether_addr_copy(mac, ioc->attr->mac); +} + +/* Firmware failure detected. Start recovery actions. */ +static void +bfa_ioc_recover(struct bfa_ioc *ioc) +{ + pr_crit("Heart Beat of IOC has failed\n"); + bfa_ioc_stats(ioc, ioc_hbfails); + bfa_ioc_stats_hb_count(ioc, ioc->hb_count); + bfa_fsm_send_event(ioc, IOC_E_HBFAIL); +} + +/* BFA IOC PF private functions */ + +static void +bfa_iocpf_enable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE); +} + +static void +bfa_iocpf_disable(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE); +} + +static void +bfa_iocpf_fail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL); +} + +static void +bfa_iocpf_initfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL); +} + +static void +bfa_iocpf_getattrfail(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL); +} + +static void +bfa_iocpf_stop(struct bfa_ioc *ioc) +{ + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP); +} + +void +bfa_nw_iocpf_timeout(struct bfa_ioc *ioc) +{ + enum bfa_iocpf_state iocpf_st; + + iocpf_st = iocpf_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm); + + if (iocpf_st == BFA_IOCPF_HWINIT) + bfa_ioc_poll_fwinit(ioc); + else + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); +} + +void +bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc) +{ + bfa_ioc_hw_sem_get(ioc); +} + +static void +bfa_ioc_poll_fwinit(struct bfa_ioc *ioc) +{ + u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc); + + if (fwstate == BFI_IOC_DISABLED) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY); + return; + } + + if (ioc->iocpf.poll_time >= BFA_IOC_TOV) { + bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT); + } else { + ioc->iocpf.poll_time += BFA_IOC_POLL_TOV; + mod_timer(&ioc->iocpf_timer, jiffies + + msecs_to_jiffies(BFA_IOC_POLL_TOV)); + } +} + +/* + * Flash module specific + */ + +/* + * FLASH DMA buffer should be big enough to hold both MFG block and + * asic block(64k) at the same time and also should be 2k aligned to + * avoid write segement to cross sector boundary. + */ +#define BFA_FLASH_SEG_SZ 2048 +#define BFA_FLASH_DMA_BUF_SZ \ + roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ) + +static void +bfa_flash_cb(struct bfa_flash *flash) +{ + flash->op_busy = 0; + if (flash->cbfn) + flash->cbfn(flash->cbarg, flash->status); +} + +static void +bfa_flash_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_flash *flash = cbarg; + + switch (event) { + case BFA_IOC_E_DISABLED: + case BFA_IOC_E_FAILED: + if (flash->op_busy) { + flash->status = BFA_STATUS_IOC_FAILURE; + flash->cbfn(flash->cbarg, flash->status); + flash->op_busy = 0; + } + break; + default: + break; + } +} + +/* + * Send flash write request. + */ +static void +bfa_flash_write_send(struct bfa_flash *flash) +{ + struct bfi_flash_write_req *msg = + (struct bfi_flash_write_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + + /* indicate if it's the last msg of the whole write operation */ + msg->last = (len == flash->residue) ? 1 : 0; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + flash->residue -= len; + flash->offset += len; +} + +/** + * bfa_flash_read_send - Send flash read request. + * + * @cbarg: callback argument + */ +static void +bfa_flash_read_send(void *cbarg) +{ + struct bfa_flash *flash = cbarg; + struct bfi_flash_read_req *msg = + (struct bfi_flash_read_req *) flash->mb.msg; + u32 len; + + msg->type = be32_to_cpu(flash->type); + msg->instance = flash->instance; + msg->offset = be32_to_cpu(flash->addr_off + flash->offset); + len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ? + flash->residue : BFA_FLASH_DMA_BUF_SZ; + msg->length = be32_to_cpu(len); + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, len, flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); +} + +/** + * bfa_flash_intr - Process flash response messages upon receiving interrupts. + * + * @flasharg: flash structure + * @msg: message structure + */ +static void +bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg) +{ + struct bfa_flash *flash = flasharg; + u32 status; + + union { + struct bfi_flash_query_rsp *query; + struct bfi_flash_write_rsp *write; + struct bfi_flash_read_rsp *read; + struct bfi_mbmsg *msg; + } m; + + m.msg = msg; + + /* receiving response after ioc failure */ + if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT) + return; + + switch (msg->mh.msg_id) { + case BFI_FLASH_I2H_QUERY_RSP: + status = be32_to_cpu(m.query->status); + if (status == BFA_STATUS_OK) { + u32 i; + struct bfa_flash_attr *attr, *f; + + attr = (struct bfa_flash_attr *) flash->ubuf; + f = (struct bfa_flash_attr *) flash->dbuf_kva; + attr->status = be32_to_cpu(f->status); + attr->npart = be32_to_cpu(f->npart); + for (i = 0; i < attr->npart; i++) { + attr->part[i].part_type = + be32_to_cpu(f->part[i].part_type); + attr->part[i].part_instance = + be32_to_cpu(f->part[i].part_instance); + attr->part[i].part_off = + be32_to_cpu(f->part[i].part_off); + attr->part[i].part_size = + be32_to_cpu(f->part[i].part_size); + attr->part[i].part_len = + be32_to_cpu(f->part[i].part_len); + attr->part[i].part_status = + be32_to_cpu(f->part[i].part_status); + } + } + flash->status = status; + bfa_flash_cb(flash); + break; + case BFI_FLASH_I2H_WRITE_RSP: + status = be32_to_cpu(m.write->status); + if (status != BFA_STATUS_OK || flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_write_send(flash); + break; + case BFI_FLASH_I2H_READ_RSP: + status = be32_to_cpu(m.read->status); + if (status != BFA_STATUS_OK) { + flash->status = status; + bfa_flash_cb(flash); + } else { + u32 len = be32_to_cpu(m.read->length); + memcpy(flash->ubuf + flash->offset, + flash->dbuf_kva, len); + flash->residue -= len; + flash->offset += len; + if (flash->residue == 0) { + flash->status = status; + bfa_flash_cb(flash); + } else + bfa_flash_read_send(flash); + } + break; + case BFI_FLASH_I2H_BOOT_VER_RSP: + case BFI_FLASH_I2H_EVENT: + break; + default: + WARN_ON(1); + } +} + +/* + * Flash memory info API. + */ +u32 +bfa_nw_flash_meminfo(void) +{ + return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_nw_flash_attach - Flash attach API. + * + * @flash: flash structure + * @ioc: ioc structure + * @dev: device structure + */ +void +bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev) +{ + flash->ioc = ioc; + flash->cbfn = NULL; + flash->cbarg = NULL; + flash->op_busy = 0; + + bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash); + bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash); + list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q); +} + +/** + * bfa_nw_flash_memclaim - Claim memory for flash + * + * @flash: flash structure + * @dm_kva: pointer to virtual memory address + * @dm_pa: physical memory address + */ +void +bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa) +{ + flash->dbuf_kva = dm_kva; + flash->dbuf_pa = dm_pa; + memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ); + dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); + dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ); +} + +/** + * bfa_nw_flash_get_attr - Get flash attribute. + * + * @flash: flash structure + * @attr: flash attribute structure + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg) +{ + struct bfi_flash_query_req *msg = + (struct bfi_flash_query_req *) flash->mb.msg; + + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->ubuf = (u8 *) attr; + + bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ, + bfa_ioc_portid(flash->ioc)); + bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa); + bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL); + + return BFA_STATUS_OK; +} + +/** + * bfa_nw_flash_update_part - Update flash partition. + * + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: update data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (type == BFA_FLASH_PART_MFG) + return BFA_STATUS_EINVAL; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_write_send(flash); + + return BFA_STATUS_OK; +} + +/** + * bfa_nw_flash_read_part - Read flash partition. + * + * @flash: flash structure + * @type: flash partition type + * @instance: flash partition instance + * @buf: read data buffer + * @len: data buffer length + * @offset: offset relative to the partition starting address + * @cbfn: callback function + * @cbarg: callback argument + * + * Return status. + */ +enum bfa_status +bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance, + void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg) +{ + if (!bfa_nw_ioc_is_operational(flash->ioc)) + return BFA_STATUS_IOC_NON_OP; + + /* + * 'len' must be in word (4-byte) boundary + */ + if (!len || (len & 0x03)) + return BFA_STATUS_FLASH_BAD_LEN; + + if (flash->op_busy) + return BFA_STATUS_DEVBUSY; + + flash->op_busy = 1; + flash->cbfn = cbfn; + flash->cbarg = cbarg; + flash->type = type; + flash->instance = instance; + flash->residue = len; + flash->offset = 0; + flash->addr_off = offset; + flash->ubuf = buf; + + bfa_flash_read_send(flash); + + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h new file mode 100644 index 0000000000..f30d06ec4f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h @@ -0,0 +1,352 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_IOC_H__ +#define __BFA_IOC_H__ + +#include "bfa_cs.h" +#include "bfi.h" +#include "cna.h" + +#define BFA_IOC_TOV 3000 /* msecs */ +#define BFA_IOC_HWSEM_TOV 500 /* msecs */ +#define BFA_IOC_HB_TOV 500 /* msecs */ +#define BFA_IOC_POLL_TOV 200 /* msecs */ +#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \ + BFI_IOC_TRC_HDR_SZ) + +/* PCI device information required by IOC */ +struct bfa_pcidev { + int pci_slot; + u8 pci_func; + u16 device_id; + u16 ssid; + void __iomem *pci_bar_kva; +}; + +/* Structure used to remember the DMA-able memory block's KVA and Physical + * Address + */ +struct bfa_dma { + void *kva; /* ! Kernel virtual address */ + u64 pa; /* ! Physical address */ +}; + +#define BFA_DMA_ALIGN_SZ 256 + +/* smem size for Crossbow and Catapult */ +#define BFI_SMEM_CB_SIZE 0x200000U /* ! 2MB for crossbow */ +#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ + +/* BFA dma address assignment macro. (big endian format) */ +#define bfa_dma_be_addr_set(dma_addr, pa) \ + __bfa_dma_be_addr_set(&dma_addr, (u64)pa) +static inline void +__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa) +{ + dma_addr->a32.addr_lo = (u32) htonl(pa); + dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa)); +} + +#define bfa_alen_set(__alen, __len, __pa) \ + __bfa_alen_set(__alen, __len, (u64)__pa) + +static inline void +__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa) +{ + alen->al_len = cpu_to_be32(len); + bfa_dma_be_addr_set(alen->al_addr, pa); +} + +struct bfa_ioc_regs { + void __iomem *hfn_mbox_cmd; + void __iomem *hfn_mbox; + void __iomem *lpu_mbox_cmd; + void __iomem *lpu_mbox; + void __iomem *lpu_read_stat; + void __iomem *pss_ctl_reg; + void __iomem *pss_err_status_reg; + void __iomem *app_pll_fast_ctl_reg; + void __iomem *app_pll_slow_ctl_reg; + void __iomem *ioc_sem_reg; + void __iomem *ioc_usage_sem_reg; + void __iomem *ioc_init_sem_reg; + void __iomem *ioc_usage_reg; + void __iomem *host_page_num_fn; + void __iomem *heartbeat; + void __iomem *ioc_fwstate; + void __iomem *alt_ioc_fwstate; + void __iomem *ll_halt; + void __iomem *alt_ll_halt; + void __iomem *err_set; + void __iomem *ioc_fail_sync; + void __iomem *shirq_isr_next; + void __iomem *shirq_msk_next; + void __iomem *smem_page_start; + u32 smem_pg0; +}; + +/* IOC Mailbox structures */ +typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg); +struct bfa_mbox_cmd { + struct list_head qe; + bfa_mbox_cmd_cbfn_t cbfn; + void *cbarg; + u32 msg[BFI_IOC_MSGSZ]; +}; + +/* IOC mailbox module */ +typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m); +struct bfa_ioc_mbox_mod { + struct list_head cmd_q; /*!< pending mbox queue */ + int nmclass; /*!< number of handlers */ + struct { + bfa_ioc_mbox_mcfunc_t cbfn; /*!< message handlers */ + void *cbarg; + } mbhdlr[BFI_MC_MAX]; +}; + +/* IOC callback function interfaces */ +typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status); +typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa); +typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa); +struct bfa_ioc_cbfn { + bfa_ioc_enable_cbfn_t enable_cbfn; + bfa_ioc_disable_cbfn_t disable_cbfn; + bfa_ioc_hbfail_cbfn_t hbfail_cbfn; + bfa_ioc_reset_cbfn_t reset_cbfn; +}; + +/* IOC event notification mechanism. */ +enum bfa_ioc_event { + BFA_IOC_E_ENABLED = 1, + BFA_IOC_E_DISABLED = 2, + BFA_IOC_E_FAILED = 3, +}; + +typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event); + +struct bfa_ioc_notify { + struct list_head qe; + bfa_ioc_notify_cbfn_t cbfn; + void *cbarg; +}; + +/* Initialize a IOC event notification structure */ +#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do { \ + (__notify)->cbfn = (__cbfn); \ + (__notify)->cbarg = (__cbarg); \ +} while (0) + +enum iocpf_event; + +struct bfa_iocpf { + void (*fsm)(struct bfa_iocpf *s, enum iocpf_event e); + struct bfa_ioc *ioc; + bool fw_mismatch_notified; + bool auto_recover; + u32 poll_time; +}; + +enum ioc_event; + +struct bfa_ioc { + void (*fsm)(struct bfa_ioc *s, enum ioc_event e); + struct bfa *bfa; + struct bfa_pcidev pcidev; + struct timer_list ioc_timer; + struct timer_list iocpf_timer; + struct timer_list sem_timer; + struct timer_list hb_timer; + u32 hb_count; + struct list_head notify_q; + void *dbg_fwsave; + int dbg_fwsave_len; + bool dbg_fwsave_once; + enum bfi_pcifn_class clscode; + struct bfa_ioc_regs ioc_regs; + struct bfa_ioc_drv_stats stats; + bool fcmode; + bool pllinit; + bool stats_busy; /*!< outstanding stats */ + u8 port_id; + + struct bfa_dma attr_dma; + struct bfi_ioc_attr *attr; + struct bfa_ioc_cbfn *cbfn; + struct bfa_ioc_mbox_mod mbox_mod; + const struct bfa_ioc_hwif *ioc_hwif; + struct bfa_iocpf iocpf; + enum bfi_asic_gen asic_gen; + enum bfi_asic_mode asic_mode; + enum bfi_port_mode port0_mode; + enum bfi_port_mode port1_mode; + enum bfa_mode port_mode; + u8 ad_cap_bm; /*!< adapter cap bit mask */ + u8 port_mode_cfg; /*!< config port mode */ +}; + +struct bfa_ioc_hwif { + enum bfa_status (*ioc_pll_init) (void __iomem *rb, + enum bfi_asic_mode m); + bool (*ioc_firmware_lock) (struct bfa_ioc *ioc); + void (*ioc_firmware_unlock) (struct bfa_ioc *ioc); + void (*ioc_reg_init) (struct bfa_ioc *ioc); + void (*ioc_map_port) (struct bfa_ioc *ioc); + void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, + bool msix); + void (*ioc_notify_fail) (struct bfa_ioc *ioc); + void (*ioc_ownership_reset) (struct bfa_ioc *ioc); + bool (*ioc_sync_start) (struct bfa_ioc *ioc); + void (*ioc_sync_join) (struct bfa_ioc *ioc); + void (*ioc_sync_leave) (struct bfa_ioc *ioc); + void (*ioc_sync_ack) (struct bfa_ioc *ioc); + bool (*ioc_sync_complete) (struct bfa_ioc *ioc); + bool (*ioc_lpu_read_stat) (struct bfa_ioc *ioc); + void (*ioc_set_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc); + void (*ioc_set_alt_fwstate) (struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate); + enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc); + +}; + +#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) +#define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id) +#define bfa_ioc_bar0(__ioc) ((__ioc)->pcidev.pci_bar_kva) +#define bfa_ioc_portid(__ioc) ((__ioc)->port_id) +#define bfa_ioc_asic_gen(__ioc) ((__ioc)->asic_gen) +#define bfa_ioc_is_default(__ioc) \ + (bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc)) +#define bfa_ioc_speed_sup(__ioc) \ + BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop) +#define bfa_ioc_get_nports(__ioc) \ + BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop) + +#define bfa_ioc_stats(_ioc, _stats) ((_ioc)->stats._stats++) +#define bfa_ioc_stats_hb_count(_ioc, _hb_count) \ + ((_ioc)->stats.hb_count = (_hb_count)) +#define BFA_IOC_FWIMG_MINSZ (16 * 1024) +#define BFA_IOC_FW_SMEM_SIZE(__ioc) \ + ((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB) \ + ? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE) +#define BFA_IOC_FLASH_CHUNK_NO(off) (off / BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off) (off % BFI_FLASH_CHUNK_SZ_WORDS) +#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno) (chunkno * BFI_FLASH_CHUNK_SZ_WORDS) + +/* IOC mailbox interface */ +bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, + struct bfa_mbox_cmd *cmd, + bfa_mbox_cmd_cbfn_t cbfn, void *cbarg); +void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc); +void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc, + bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg); + +/* IOC interfaces */ + +#define bfa_ioc_pll_init_asic(__ioc) \ + ((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \ + (__ioc)->asic_mode)) + +#define bfa_ioc_lpu_read_stat(__ioc) do { \ + if ((__ioc)->ioc_hwif->ioc_lpu_read_stat) \ + ((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc)); \ +} while (0) + +void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc); +void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc); + +void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, + struct bfa_ioc_cbfn *cbfn); +void bfa_nw_ioc_auto_recover(bool auto_recover); +void bfa_nw_ioc_detach(struct bfa_ioc *ioc); +void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev, + enum bfi_pcifn_class clscode); +u32 bfa_nw_ioc_meminfo(void); +void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa); +void bfa_nw_ioc_enable(struct bfa_ioc *ioc); +void bfa_nw_ioc_disable(struct bfa_ioc *ioc); + +void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc); +bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc); +void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); +enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc); +void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc, + struct bfa_ioc_notify *notify); +bool bfa_nw_ioc_sem_get(void __iomem *sem_reg); +void bfa_nw_ioc_sem_release(void __iomem *sem_reg); +void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc); +void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, + struct bfi_ioc_image_hdr *fwhdr); +void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac); +void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave); +int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen); +int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen); + +/* + * Timeout APIs + */ +void bfa_nw_ioc_timeout(struct bfa_ioc *ioc); +void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc); +void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc); +void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc); + +/* + * F/W Image Size & Chunk + */ +u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off); +u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen); + +/* + * Flash module specific + */ +typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status); + +struct bfa_flash { + struct bfa_ioc *ioc; /* back pointer to ioc */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 op_busy; /* operation busy flag */ + u32 residue; /* residual length */ + u32 offset; /* offset */ + enum bfa_status status; /* status */ + u8 *dbuf_kva; /* dma buf virtual address */ + u64 dbuf_pa; /* dma buf physical address */ + bfa_cb_flash cbfn; /* user callback function */ + void *cbarg; /* user callback arg */ + u8 *ubuf; /* user supplied buffer */ + u32 addr_off; /* partition address offset */ + struct bfa_mbox_cmd mb; /* mailbox */ + struct bfa_ioc_notify ioc_notify; /* ioc event notify */ +}; + +enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash, + struct bfa_flash_attr *attr, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash, + u32 type, u8 instance, void *buf, u32 len, u32 offset, + bfa_cb_flash cbfn, void *cbarg); +u32 bfa_nw_flash_meminfo(void); +void bfa_nw_flash_attach(struct bfa_flash *flash, + struct bfa_ioc *ioc, void *dev); +void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa); + +#endif /* __BFA_IOC_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c new file mode 100644 index 0000000000..bd643d8411 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c @@ -0,0 +1,936 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "bfa_ioc.h" +#include "cna.h" +#include "bfi.h" +#include "bfi_reg.h" +#include "bfa_defs.h" + +#define bfa_ioc_ct_sync_pos(__ioc) BIT(bfa_ioc_pcifn(__ioc)) +#define BFA_IOC_SYNC_REQD_SH 16 +#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff) +#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000) +#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH) +#define bfa_ioc_ct_sync_reqd_pos(__ioc) \ + (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH) + +/* + * forward declarations + */ +static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc); +static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc); +static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc); +static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); +static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc); +static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc); +static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc); +static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_cur_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc); +static void bfa_ioc_ct_set_alt_ioc_fwstate( + struct bfa_ioc *ioc, enum bfi_ioc_state fwstate); +static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc); +static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, + enum bfi_asic_mode asic_mode); +static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc); + +static const struct bfa_ioc_hwif nw_hwif_ct = { + .ioc_pll_init = bfa_ioc_ct_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct_reg_init, + .ioc_map_port = bfa_ioc_ct_map_port, + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, +}; + +static const struct bfa_ioc_hwif nw_hwif_ct2 = { + .ioc_pll_init = bfa_ioc_ct2_pll_init, + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock, + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock, + .ioc_reg_init = bfa_ioc_ct2_reg_init, + .ioc_map_port = bfa_ioc_ct2_map_port, + .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat, + .ioc_isr_mode_set = NULL, + .ioc_notify_fail = bfa_ioc_ct_notify_fail, + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset, + .ioc_sync_start = bfa_ioc_ct_sync_start, + .ioc_sync_join = bfa_ioc_ct_sync_join, + .ioc_sync_leave = bfa_ioc_ct_sync_leave, + .ioc_sync_ack = bfa_ioc_ct_sync_ack, + .ioc_sync_complete = bfa_ioc_ct_sync_complete, + .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate, + .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate, + .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate, + .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate, +}; + +/* Called from bfa_ioc_attach() to map asic specific calls. */ +void +bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct; +} + +void +bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc) +{ + ioc->ioc_hwif = &nw_hwif_ct2; +} + +/* Return true if firmware of current driver matches the running firmware. */ +static bool +bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc) +{ + enum bfi_ioc_state ioc_fwstate; + u32 usecnt; + struct bfi_ioc_image_hdr fwhdr; + + /** + * If bios boot (flash based) -- do not increment usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return true; + + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + + /** + * If usage count is 0, always return TRUE. + */ + if (usecnt == 0) { + writel(1, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_fail_sync); + return true; + } + + ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate); + + /** + * Use count cannot be non-zero and chip in uninitialized state. + */ + BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT)); + + /** + * Check if another driver with a different firmware is active + */ + bfa_nw_ioc_fwver_get(ioc, &fwhdr); + if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) { + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return false; + } + + /** + * Same firmware version. Increment the reference count. + */ + usecnt++; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + return true; +} + +static void +bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc) +{ + u32 usecnt; + + /** + * If bios boot (flash based) -- do not decrement usage count + */ + if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) < + BFA_IOC_FWIMG_MINSZ) + return; + + /** + * decrement usage count + */ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + usecnt = readl(ioc->ioc_regs.ioc_usage_reg); + BUG_ON(!(usecnt > 0)); + + usecnt--; + writel(usecnt, ioc->ioc_regs.ioc_usage_reg); + + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); +} + +/* Notify other functions on HB failure. */ +static void +bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc) +{ + writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); + writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt); + /* Wait for halt to take effect */ + readl(ioc->ioc_regs.ll_halt); + readl(ioc->ioc_regs.alt_ll_halt); +} + +/* Host to LPU mailbox message addresses */ +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; +} ct_fnreg[] = { + { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, + { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, + { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 }, + { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 } +}; + +/* Host <-> LPU mailbox command/status registers - port 0 */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p0reg[] = { + { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT } +}; + +/* Host <-> LPU mailbox command/status registers - port 1 */ +static const struct { + u32 hfn; + u32 lpu; +} ct_p1reg[] = { + { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT }, + { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }, + { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT }, + { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT } +}; + +static const struct { + u32 hfn_mbox; + u32 lpu_mbox; + u32 hfn_pgn; + u32 hfn; + u32 lpu; + u32 lpu_read; +} ct2_reg[] = { + { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU0_READ_STAT}, + { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM, + CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT, + CT2_HOSTFN_LPU1_READ_STAT}, +}; + +static void +bfa_ioc_ct_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int pcifn = bfa_ioc_pcifn(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; + + if (ioc->port_id == 0) { + ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = (rb + ERR_SET_REG); +} + +static void +bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc) +{ + void __iomem *rb; + int port = bfa_ioc_portid(ioc); + + rb = bfa_ioc_bar0(ioc); + + ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox; + ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox; + ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn; + ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn; + ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu; + ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read; + + if (port == 0) { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1; + } else { + ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG; + ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG; + ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG; + ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; + ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0; + } + + /* + * PSS control registers + */ + ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG; + ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG; + ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG; + ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG; + + /* + * IOC semaphore registers and serialization + */ + ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG; + ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG; + ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG; + ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT; + ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC; + + /** + * sram memory access + */ + ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START; + ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT; + + /* + * err set reg : for notification of hb failure in fcmode + */ + ioc->ioc_regs.err_set = rb + ERR_SET_REG; +} + +/* Initialize IOC to port mapping. */ + +#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8) +static void +bfa_ioc_ct_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + /** + * For catapult, base port id on personality register and IOC type + */ + r32 = readl(rb + FNC_PERS_REG); + r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)); + ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH; + +} + +static void +bfa_ioc_ct2_map_port(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + CT2_HOSTFN_PERSONALITY0); + ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH); +} + +/* Set interrupt mode for a function: INTX or MSIX */ +static void +bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32, mode; + + r32 = readl(rb + FNC_PERS_REG); + + mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) & + __F0_INTX_STATUS; + + /** + * If already in desired mode, do not change anything + */ + if ((!msix && mode) || (msix && !mode)) + return; + + if (msix) + mode = __F0_INTX_STATUS_MSIX; + else + mode = __F0_INTX_STATUS_INTA; + + r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))); + + writel(r32, rb + FNC_PERS_REG); +} + +static bool +bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc) +{ + u32 r32; + + r32 = readl(ioc->ioc_regs.lpu_read_stat); + if (r32) { + writel(1, ioc->ioc_regs.lpu_read_stat); + return true; + } + + return false; +} + +/* MSI-X resource allocation for 1860 with no asic block */ +#define HOSTFN_MSIX_DEFAULT 64 +#define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138 +#define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c +#define __MSIX_VT_NUMVT__MK 0x003ff800 +#define __MSIX_VT_NUMVT__SH 11 +#define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH) +#define __MSIX_VT_OFST_ 0x000007ff +void +bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc) +{ + void __iomem *rb = ioc->pcidev.pci_bar_kva; + u32 r32; + + r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT); + if (r32 & __MSIX_VT_NUMVT__MK) { + writel(r32 & __MSIX_VT_OFST_, + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); + return; + } + + writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) | + HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_OFST_NUMVT); + writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc), + rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR); +} + +/* Cleanup hw semaphore and usecnt registers */ +static void +bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc) +{ + bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg); + writel(0, ioc->ioc_regs.ioc_usage_reg); + bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); + + /* + * Read the hw sem reg to make sure that it is locked + * before we clear it. If it is not locked, writing 1 + * will lock it instead of clearing it. + */ + readl(ioc->ioc_regs.ioc_sem_reg); + bfa_nw_ioc_hw_sem_release(ioc); +} + +/* Synchronized IOC failure processing routines */ +static bool +bfa_ioc_ct_sync_start(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + + /* + * Driver load time. If the sync required bit for this PCI fn + * is set, it is due to an unclean exit by the driver for this + * PCI fn in the previous incarnation. Whoever comes here first + * should clean it up, no matter which PCI fn. + */ + + if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) { + writel(0, ioc->ioc_regs.ioc_fail_sync); + writel(1, ioc->ioc_regs.ioc_usage_reg); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + return bfa_ioc_ct_sync_complete(ioc); +} +/* Synchronized IOC failure processing routines */ +static void +bfa_ioc_ct_sync_join(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc); + + writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) | + bfa_ioc_ct_sync_pos(ioc); + + writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync); +} + +static void +bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + + writel(r32 | bfa_ioc_ct_sync_pos(ioc), ioc->ioc_regs.ioc_fail_sync); +} + +static bool +bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc) +{ + u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync); + u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32); + u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32); + u32 tmp_ackd; + + if (sync_ackd == 0) + return true; + + /** + * The check below is to see whether any other PCI fn + * has reinitialized the ASIC (reset sync_ackd bits) + * and failed again while this IOC was waiting for hw + * semaphore (in bfa_iocpf_sm_semwait()). + */ + tmp_ackd = sync_ackd; + if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) && + !(sync_ackd & bfa_ioc_ct_sync_pos(ioc))) + sync_ackd |= bfa_ioc_ct_sync_pos(ioc); + + if (sync_reqd == sync_ackd) { + writel(bfa_ioc_ct_clear_sync_ackd(r32), + ioc->ioc_regs.ioc_fail_sync); + writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); + writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate); + return true; + } + + /** + * If another PCI fn reinitialized and failed again while + * this IOC was waiting for hw sem, the sync_ackd bit for + * this IOC need to be set again to allow reinitialization. + */ + if (tmp_ackd != sync_ackd) + writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync); + + return false; +} + +static void +bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate); +} + +static void +bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc, + enum bfi_ioc_state fwstate) +{ + writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfi_ioc_state +bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc) +{ + return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate); +} + +static enum bfa_status +bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + u32 pll_sclk, pll_fclk, r32; + bool fcmode = (asic_mode == BFI_ASIC_MODE_FC); + + pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST | + __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) | + __APP_PLL_SCLK_JITLMT0_1(3U) | + __APP_PLL_SCLK_CNTLMT0_1(1U); + pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST | + __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) | + __APP_PLL_LCLK_JITLMT0_1(3U) | + __APP_PLL_LCLK_CNTLMT0_1(1U); + + if (fcmode) { + writel(0, (rb + OP_MODE)); + writel(__APP_EMS_CMLCKSEL | + __APP_EMS_REFCKBUFEN2 | + __APP_EMS_CHANNEL_SEL, + (rb + ETH_MAC_SER_REG)); + } else { + writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE)); + writel(__APP_EMS_REFCKBUFEN1, + (rb + ETH_MAC_SER_REG)); + } + writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG)); + writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN0_INT_MSK)); + writel(0xffffffffU, (rb + HOSTFN1_INT_MSK)); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + APP_PLL_LCLK_CTL_REG); + writel(pll_sclk | + __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + readl(rb + HOSTFN0_INT_MSK); + udelay(2000); + writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS)); + writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS)); + writel(pll_sclk | + __APP_PLL_SCLK_ENABLE, + rb + APP_PLL_SCLK_CTL_REG); + writel(pll_fclk | + __APP_PLL_LCLK_ENABLE, + rb + APP_PLL_LCLK_CTL_REG); + + if (!fcmode) { + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0)); + writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1)); + } + r32 = readl(rb + PSS_CTL_REG); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, (rb + PSS_CTL_REG)); + udelay(1000); + if (!fcmode) { + writel(0, (rb + PMM_1T_RESET_REG_P0)); + writel(0, (rb + PMM_1T_RESET_REG_P1)); + } + + writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG)); + udelay(1000); + r32 = readl(rb + MBIST_STAT_REG); + writel(0, (rb + MBIST_CTL_REG)); + return BFA_STATUS_OK; +} + +static void +bfa_ioc_ct2_sclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put s_clk PLL and PLL FSM in reset + */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN); + r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS | + __APP_PLL_SCLK_LOGIC_SOFT_RESET); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * Ignore mode and program for the max clock (which is FC16) + * Firmware/NFC will do the PLL init appropriately + */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2); + writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG)); + + /* + * while doing PLL init dont clock gate ethernet subsystem + */ + r32 = readl(rb + CT2_CHIP_MISC_PRG); + writel(r32 | __ETH_CLK_ENABLE_PORT0, + rb + CT2_CHIP_MISC_PRG); + + r32 = readl(rb + CT2_PCIE_MISC_REG); + writel(r32 | __ETH_CLK_ENABLE_PORT1, + rb + CT2_PCIE_MISC_REG); + + /* + * set sclk value + */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL | + __APP_PLL_SCLK_CLK_DIV2); + writel(r32 | 0x1061731b, rb + CT2_APP_PLL_SCLK_CTL_REG); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); + + /* + * Dont do clock gating for ethernet subsystem, firmware/NFC will + * do this appropriately + */ +} + +static void +bfa_ioc_ct2_lclk_init(void __iomem *rb) +{ + u32 r32; + + /* + * put l_clk PLL and PLL FSM in reset + */ + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN); + r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS | + __APP_PLL_LCLK_LOGIC_SOFT_RESET); + writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); + + /* + * set LPU speed (set for FC16 which will work for other modes) + */ + r32 = readl(rb + CT2_CHIP_MISC_PRG); + writel(r32, (rb + CT2_CHIP_MISC_PRG)); + + /* + * set LPU half speed (set for FC16 which will work for other modes) + */ + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + writel(r32, rb + CT2_APP_PLL_LCLK_CTL_REG); + + /* + * set lclk for mode (set for FC16) + */ + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED); + r32 |= 0x20c1731b; + writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG)); + + /* + * poll for s_clk lock or delay 1ms + */ + udelay(1000); +} + +static void +bfa_ioc_ct2_mem_init(void __iomem *rb) +{ + u32 r32; + + r32 = readl(rb + PSS_CTL_REG); + r32 &= ~__PSS_LMEM_RESET; + writel(r32, rb + PSS_CTL_REG); + udelay(1000); + + writel(__EDRAM_BISTR_START, rb + CT2_MBIST_CTL_REG); + udelay(1000); + writel(0, rb + CT2_MBIST_CTL_REG); +} + +static void +bfa_ioc_ct2_mac_reset(void __iomem *rb) +{ + volatile u32 r32; + + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_SCLK_CTL_REG); + + /* + * release soft reset on s_clk & l_clk + */ + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_LCLK_CTL_REG); + + /* put port0, port1 MAC & AHB in reset */ + writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, + rb + CT2_CSI_MAC_CONTROL_REG(0)); + writel(__CSI_MAC_RESET | __CSI_MAC_AHB_RESET, + rb + CT2_CSI_MAC_CONTROL_REG(1)); +} + +#define CT2_NFC_MAX_DELAY 1000 +#define CT2_NFC_VER_VALID 0x143 +#define BFA_IOC_PLL_POLL 1000000 + +static bool +bfa_ioc_ct2_nfc_halted(void __iomem *rb) +{ + volatile u32 r32; + + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + return true; + + return false; +} + +static void +bfa_ioc_ct2_nfc_resume(void __iomem *rb) +{ + volatile u32 r32; + int i; + + writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (!(r32 & __NFC_CONTROLLER_HALTED)) + return; + udelay(1000); + } + BUG_ON(1); +} + +static enum bfa_status +bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) +{ + volatile u32 wgn, r32; + u32 nfc_ver, i; + + wgn = readl(rb + CT2_WGN_STATUS); + + nfc_ver = readl(rb + CT2_RSC_GPR15_REG); + + if (wgn == (__A2T_AHB_LOAD | __WGN_READY) && + nfc_ver >= CT2_NFC_VER_VALID) { + if (bfa_ioc_ct2_nfc_halted(rb)) + bfa_ioc_ct2_nfc_resume(rb); + writel(__RESET_AND_START_SCLK_LCLK_PLLS, + rb + CT2_CSI_FW_CTL_SET_REG); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS) + break; + } + BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)); + + for (i = 0; i < BFA_IOC_PLL_POLL; i++) { + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS)) + break; + } + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + udelay(1000); + + r32 = readl(rb + CT2_CSI_FW_CTL_REG); + BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS); + } else { + writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); + for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { + r32 = readl(rb + CT2_NFC_CSR_SET_REG); + if (r32 & __NFC_CONTROLLER_HALTED) + break; + udelay(1000); + } + + bfa_ioc_ct2_mac_reset(rb); + bfa_ioc_ct2_sclk_init(rb); + bfa_ioc_ct2_lclk_init(rb); + + /* release soft reset on s_clk & l_clk */ + r32 = readl(rb + CT2_APP_PLL_SCLK_CTL_REG); + writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_SCLK_CTL_REG); + r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG); + writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET, + rb + CT2_APP_PLL_LCLK_CTL_REG); + } + + /* Announce flash device presence, if flash was corrupted. */ + if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) { + r32 = readl(rb + PSS_GPIO_OUT_REG); + writel(r32 & ~1, rb + PSS_GPIO_OUT_REG); + r32 = readl(rb + PSS_GPIO_OE_REG); + writel(r32 | 1, rb + PSS_GPIO_OE_REG); + } + + /* + * Mask the interrupts and clear any + * pending interrupts left by BIOS/EFI + */ + writel(1, rb + CT2_LPU0_HOSTFN_MBOX0_MSK); + writel(1, rb + CT2_LPU1_HOSTFN_MBOX0_MSK); + + /* For first time initialization, no need to clear interrupts */ + r32 = readl(rb + HOST_SEM5_REG); + if (r32 & 0x1) { + r32 = readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); + if (r32 == 1) { + writel(1, rb + CT2_LPU0_HOSTFN_CMD_STAT); + readl(rb + CT2_LPU0_HOSTFN_CMD_STAT); + } + r32 = readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + if (r32 == 1) { + writel(1, rb + CT2_LPU1_HOSTFN_CMD_STAT); + readl(rb + CT2_LPU1_HOSTFN_CMD_STAT); + } + } + + bfa_ioc_ct2_mem_init(rb); + + writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC0_STATE_REG); + writel(BFI_IOC_UNINIT, rb + CT2_BFA_IOC1_STATE_REG); + return BFA_STATUS_OK; +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.c b/drivers/net/ethernet/brocade/bna/bfa_msgq.c new file mode 100644 index 0000000000..fa40d5ec6f --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.c @@ -0,0 +1,658 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* MSGQ module source file. */ + +#include "bfi.h" +#include "bfa_msgq.h" +#include "bfa_ioc.h" + +#define call_cmdq_ent_cbfn(_cmdq_ent, _status) \ +{ \ + bfa_msgq_cmdcbfn_t cbfn; \ + void *cbarg; \ + cbfn = (_cmdq_ent)->cbfn; \ + cbarg = (_cmdq_ent)->cbarg; \ + (_cmdq_ent)->cbfn = NULL; \ + (_cmdq_ent)->cbarg = NULL; \ + if (cbfn) { \ + cbfn(cbarg, (_status)); \ + } \ +} + +static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); +static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); + +enum cmdq_event { + CMDQ_E_START = 1, + CMDQ_E_STOP = 2, + CMDQ_E_FAIL = 3, + CMDQ_E_POST = 4, + CMDQ_E_INIT_RESP = 5, + CMDQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); +bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, + enum cmdq_event); + +static void +cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) +{ + struct bfa_msgq_cmd_entry *cmdq_ent; + + cmdq->producer_index = 0; + cmdq->consumer_index = 0; + cmdq->flags = 0; + cmdq->token = 0; + cmdq->offset = 0; + cmdq->bytes_to_copy = 0; + while (!list_empty(&cmdq->pending_q)) { + cmdq_ent = list_first_entry(&cmdq->pending_q, + struct bfa_msgq_cmd_entry, qe); + list_del(&cmdq_ent->qe); + call_cmdq_ent_cbfn(cmdq_ent, BFA_STATUS_FAILED); + } +} + +static void +cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_START: + bfa_fsm_set_state(cmdq, cmdq_sm_init_wait); + break; + + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + /* No-op */ + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_wc_down(&cmdq->msgq->init_wc); +} + +static void +cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_INIT_RESP: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq) +{ +} + +static void +cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq) +{ + bfa_msgq_cmdq_dbell(cmdq); +} + +static void +cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event) +{ + switch (event) { + case CMDQ_E_STOP: + case CMDQ_E_FAIL: + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); + break; + + case CMDQ_E_POST: + cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE; + break; + + case CMDQ_E_DB_READY: + if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) { + cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE; + bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait); + } else + bfa_fsm_set_state(cmdq, cmdq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_cmdq_dbell_ready(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY); +} + +static void +bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_PI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.cmdq_pi = htons(cmdq->producer_index); + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb, + bfa_msgq_cmdq_dbell_ready, cmdq)) { + bfa_msgq_cmdq_dbell_ready(cmdq); + } +} + +static void +__cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd) +{ + size_t len = cmd->msg_size; + size_t to_copy; + u8 *src, *dst; + + src = (u8 *)cmd->msg_hdr; + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + + while (len) { + to_copy = (len < BFI_MSGQ_CMD_ENTRY_SIZE) ? + len : BFI_MSGQ_CMD_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + src += BFI_MSGQ_CMD_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth); + dst = (u8 *)cmdq->addr.kva; + dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE); + } + +} + +static void +bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfa_msgq_cmd_entry *cmd; + int posted = 0; + + cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci); + + /* Walk through pending list to see if the command can be posted */ + while (!list_empty(&cmdq->pending_q)) { + cmd = list_first_entry(&cmdq->pending_q, + struct bfa_msgq_cmd_entry, qe); + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(cmdq)) { + list_del(&cmd->qe); + __cmd_copy(cmdq, cmd); + posted = 1; + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + } else { + break; + } + } + + if (posted) + bfa_fsm_send_event(cmdq, CMDQ_E_POST); +} + +static void +bfa_msgq_cmdq_copy_next(void *arg) +{ + struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg; + + if (cmdq->bytes_to_copy) + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_cmdq_copy_req *req = + (struct bfi_msgq_i2h_cmdq_copy_req *)mb; + + cmdq->token = 0; + cmdq->offset = ntohs(req->offset); + cmdq->bytes_to_copy = ntohs(req->len); + bfa_msgq_cmdq_copy_rsp(cmdq); +} + +static void +bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq) +{ + struct bfi_msgq_h2i_cmdq_copy_rsp *rsp = + (struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0]; + int copied; + u8 *addr = (u8 *)cmdq->addr.kva; + + memset(rsp, 0, sizeof(struct bfi_msgq_h2i_cmdq_copy_rsp)); + bfi_h2i_set(rsp->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_CMDQ_COPY_RSP, 0); + rsp->mh.mtag.i2htok = htons(cmdq->token); + copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ : + cmdq->bytes_to_copy; + addr += cmdq->offset; + memcpy(rsp->data, addr, copied); + + cmdq->token++; + cmdq->offset += copied; + cmdq->bytes_to_copy -= copied; + + if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb, + bfa_msgq_cmdq_copy_next, cmdq)) { + bfa_msgq_cmdq_copy_next(cmdq); + } +} + +static void +bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq) +{ + cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY; + INIT_LIST_HEAD(&cmdq->pending_q); + cmdq->msgq = msgq; + bfa_fsm_set_state(cmdq, cmdq_sm_stopped); +} + +static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq); + +enum rspq_event { + RSPQ_E_START = 1, + RSPQ_E_STOP = 2, + RSPQ_E_FAIL = 3, + RSPQ_E_RESP = 4, + RSPQ_E_INIT_RESP = 5, + RSPQ_E_DB_READY = 6, +}; + +bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq, + enum rspq_event); +bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event); +bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq, + enum rspq_event); + +static void +rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) +{ + rspq->producer_index = 0; + rspq->consumer_index = 0; + rspq->flags = 0; +} + +static void +rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_START: + bfa_fsm_set_state(rspq, rspq_sm_init_wait); + break; + + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) +{ + bfa_wc_down(&rspq->msgq->init_wc); +} + +static void +rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_FAIL: + case RSPQ_E_STOP: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_INIT_RESP: + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) +{ +} + +static void +rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) +{ + if (!bfa_nw_ioc_is_disabled(rspq->msgq->ioc)) + bfa_msgq_rspq_dbell(rspq); +} + +static void +rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) +{ + switch (event) { + case RSPQ_E_STOP: + case RSPQ_E_FAIL: + bfa_fsm_set_state(rspq, rspq_sm_stopped); + break; + + case RSPQ_E_RESP: + rspq->flags |= BFA_MSGQ_RSPQ_F_DB_UPDATE; + break; + + case RSPQ_E_DB_READY: + if (rspq->flags & BFA_MSGQ_RSPQ_F_DB_UPDATE) { + rspq->flags &= ~BFA_MSGQ_RSPQ_F_DB_UPDATE; + bfa_fsm_set_state(rspq, rspq_sm_dbell_wait); + } else + bfa_fsm_set_state(rspq, rspq_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bfa_msgq_rspq_dbell_ready(void *arg) +{ + struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; + bfa_fsm_send_event(rspq, RSPQ_E_DB_READY); +} + +static void +bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) +{ + struct bfi_msgq_h2i_db *dbell = + (struct bfi_msgq_h2i_db *)(&rspq->dbell_mb.msg[0]); + + memset(dbell, 0, sizeof(struct bfi_msgq_h2i_db)); + bfi_h2i_set(dbell->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_DOORBELL_CI, 0); + dbell->mh.mtag.i2htok = 0; + dbell->idx.rspq_ci = htons(rspq->consumer_index); + + if (!bfa_nw_ioc_mbox_queue(rspq->msgq->ioc, &rspq->dbell_mb, + bfa_msgq_rspq_dbell_ready, rspq)) { + bfa_msgq_rspq_dbell_ready(rspq); + } +} + +static void +bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) +{ + struct bfi_msgq_i2h_db *dbell = (struct bfi_msgq_i2h_db *)mb; + struct bfi_msgq_mhdr *msghdr; + int num_entries; + int mc; + u8 *rspq_qe; + + rspq->producer_index = ntohs(dbell->idx.rspq_pi); + + while (rspq->consumer_index != rspq->producer_index) { + rspq_qe = (u8 *)rspq->addr.kva; + rspq_qe += (rspq->consumer_index * BFI_MSGQ_RSP_ENTRY_SIZE); + msghdr = (struct bfi_msgq_mhdr *)rspq_qe; + + mc = msghdr->msg_class; + num_entries = ntohs(msghdr->num_entries); + + if ((mc >= BFI_MC_MAX) || (rspq->rsphdlr[mc].cbfn == NULL)) + break; + + (rspq->rsphdlr[mc].cbfn)(rspq->rsphdlr[mc].cbarg, msghdr); + + BFA_MSGQ_INDX_ADD(rspq->consumer_index, num_entries, + rspq->depth); + } + + bfa_fsm_send_event(rspq, RSPQ_E_RESP); +} + +static void +bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) +{ + rspq->depth = BFA_MSGQ_RSPQ_NUM_ENTRY; + rspq->msgq = msgq; + bfa_fsm_set_state(rspq, rspq_sm_stopped); +} + +static void +bfa_msgq_init_rsp(struct bfa_msgq *msgq, + struct bfi_mbmsg *mb) +{ + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_INIT_RESP); +} + +static void +bfa_msgq_init(void *arg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)arg; + struct bfi_msgq_cfg_req *msgq_cfg = + (struct bfi_msgq_cfg_req *)&msgq->init_mb.msg[0]; + + memset(msgq_cfg, 0, sizeof(struct bfi_msgq_cfg_req)); + bfi_h2i_set(msgq_cfg->mh, BFI_MC_MSGQ, BFI_MSGQ_H2I_INIT_REQ, 0); + msgq_cfg->mh.mtag.i2htok = 0; + + bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa); + msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); + bfa_dma_be_addr_set(msgq_cfg->rspq.addr, msgq->rspq.addr.pa); + msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); + + bfa_nw_ioc_mbox_queue(msgq->ioc, &msgq->init_mb, NULL, NULL); +} + +static void +bfa_msgq_isr(void *cbarg, struct bfi_mbmsg *msg) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (msg->mh.msg_id) { + case BFI_MSGQ_I2H_INIT_RSP: + bfa_msgq_init_rsp(msgq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_PI: + bfa_msgq_rspq_pi_update(&msgq->rspq, msg); + break; + + case BFI_MSGQ_I2H_DOORBELL_CI: + bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg); + break; + + case BFI_MSGQ_I2H_CMDQ_COPY_REQ: + bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg); + break; + + default: + BUG_ON(1); + } +} + +static void +bfa_msgq_notify(void *cbarg, enum bfa_ioc_event event) +{ + struct bfa_msgq *msgq = (struct bfa_msgq *)cbarg; + + switch (event) { + case BFA_IOC_E_ENABLED: + bfa_wc_init(&msgq->init_wc, bfa_msgq_init, msgq); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START); + bfa_wc_up(&msgq->init_wc); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_START); + bfa_wc_wait(&msgq->init_wc); + break; + + case BFA_IOC_E_DISABLED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_STOP); + break; + + case BFA_IOC_E_FAILED: + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL); + bfa_fsm_send_event(&msgq->rspq, RSPQ_E_FAIL); + break; + + default: + break; + } +} + +u32 +bfa_msgq_meminfo(void) +{ + return roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ) + + roundup(BFA_MSGQ_RSPQ_SIZE, BFA_DMA_ALIGN_SZ); +} + +void +bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa) +{ + msgq->cmdq.addr.kva = kva; + msgq->cmdq.addr.pa = pa; + + kva += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + pa += roundup(BFA_MSGQ_CMDQ_SIZE, BFA_DMA_ALIGN_SZ); + + msgq->rspq.addr.kva = kva; + msgq->rspq.addr.pa = pa; +} + +void +bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc) +{ + msgq->ioc = ioc; + + bfa_msgq_cmdq_attach(&msgq->cmdq, msgq); + bfa_msgq_rspq_attach(&msgq->rspq, msgq); + + bfa_nw_ioc_mbox_regisr(msgq->ioc, BFI_MC_MSGQ, bfa_msgq_isr, msgq); + bfa_ioc_notify_init(&msgq->ioc_notify, bfa_msgq_notify, msgq); + bfa_nw_ioc_notify_register(msgq->ioc, &msgq->ioc_notify); +} + +void +bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg) +{ + msgq->rspq.rsphdlr[mc].cbfn = cbfn; + msgq->rspq.rsphdlr[mc].cbarg = cbarg; +} + +void +bfa_msgq_cmd_post(struct bfa_msgq *msgq, struct bfa_msgq_cmd_entry *cmd) +{ + if (ntohs(cmd->msg_hdr->num_entries) <= + BFA_MSGQ_FREE_CNT(&msgq->cmdq)) { + __cmd_copy(&msgq->cmdq, cmd); + call_cmdq_ent_cbfn(cmd, BFA_STATUS_OK); + bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST); + } else { + list_add_tail(&cmd->qe, &msgq->cmdq.pending_q); + } +} + +void +bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len) +{ + struct bfa_msgq_rspq *rspq = &msgq->rspq; + size_t len = buf_len; + size_t to_copy; + int ci; + u8 *src, *dst; + + ci = rspq->consumer_index; + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + dst = buf; + + while (len) { + to_copy = (len < BFI_MSGQ_RSP_ENTRY_SIZE) ? + len : BFI_MSGQ_RSP_ENTRY_SIZE; + memcpy(dst, src, to_copy); + len -= to_copy; + dst += BFI_MSGQ_RSP_ENTRY_SIZE; + BFA_MSGQ_INDX_ADD(ci, 1, rspq->depth); + src = (u8 *)rspq->addr.kva; + src += (ci * BFI_MSGQ_RSP_ENTRY_SIZE); + } +} diff --git a/drivers/net/ethernet/brocade/bna/bfa_msgq.h b/drivers/net/ethernet/brocade/bna/bfa_msgq.h new file mode 100644 index 0000000000..170a4b4bed --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfa_msgq.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __BFA_MSGQ_H__ +#define __BFA_MSGQ_H__ + +#include "bfa_defs.h" +#include "bfi.h" +#include "bfa_ioc.h" +#include "bfa_cs.h" + +#define BFA_MSGQ_FREE_CNT(_q) \ + (((_q)->consumer_index - (_q)->producer_index - 1) & ((_q)->depth - 1)) + +#define BFA_MSGQ_INDX_ADD(_q_indx, _qe_num, _q_depth) \ + ((_q_indx) = (((_q_indx) + (_qe_num)) & ((_q_depth) - 1))) + +#define BFA_MSGQ_CMDQ_NUM_ENTRY 128 +#define BFA_MSGQ_CMDQ_SIZE \ + (BFI_MSGQ_CMD_ENTRY_SIZE * BFA_MSGQ_CMDQ_NUM_ENTRY) + +#define BFA_MSGQ_RSPQ_NUM_ENTRY 128 +#define BFA_MSGQ_RSPQ_SIZE \ + (BFI_MSGQ_RSP_ENTRY_SIZE * BFA_MSGQ_RSPQ_NUM_ENTRY) + +#define bfa_msgq_cmd_set(_cmd, _cbfn, _cbarg, _msg_size, _msg_hdr) \ +do { \ + (_cmd)->cbfn = (_cbfn); \ + (_cmd)->cbarg = (_cbarg); \ + (_cmd)->msg_size = (_msg_size); \ + (_cmd)->msg_hdr = (_msg_hdr); \ +} while (0) + +struct bfa_msgq; + +typedef void (*bfa_msgq_cmdcbfn_t)(void *cbarg, enum bfa_status status); + +struct bfa_msgq_cmd_entry { + struct list_head qe; + bfa_msgq_cmdcbfn_t cbfn; + void *cbarg; + size_t msg_size; + struct bfi_msgq_mhdr *msg_hdr; +}; + +enum bfa_msgq_cmdq_flags { + BFA_MSGQ_CMDQ_F_DB_UPDATE = 1, +}; + +enum cmdq_event; + +struct bfa_msgq_cmdq { + void (*fsm)(struct bfa_msgq_cmdq *s, enum cmdq_event e); + enum bfa_msgq_cmdq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + u16 token; + int offset; + int bytes_to_copy; + struct bfa_mbox_cmd copy_mb; + + struct list_head pending_q; /* pending command queue */ + + struct bfa_msgq *msgq; +}; + +enum bfa_msgq_rspq_flags { + BFA_MSGQ_RSPQ_F_DB_UPDATE = 1, +}; + +typedef void (*bfa_msgq_mcfunc_t)(void *cbarg, struct bfi_msgq_mhdr *mhdr); + +enum rspq_event; + +struct bfa_msgq_rspq { + void (*fsm)(struct bfa_msgq_rspq *s, enum rspq_event e); + enum bfa_msgq_rspq_flags flags; + + u16 producer_index; + u16 consumer_index; + u16 depth; /* FW Q depth is 16 bits */ + struct bfa_dma addr; + struct bfa_mbox_cmd dbell_mb; + + int nmclass; + struct { + bfa_msgq_mcfunc_t cbfn; + void *cbarg; + } rsphdlr[BFI_MC_MAX]; + + struct bfa_msgq *msgq; +}; + +struct bfa_msgq { + struct bfa_msgq_cmdq cmdq; + struct bfa_msgq_rspq rspq; + + struct bfa_wc init_wc; + struct bfa_mbox_cmd init_mb; + + struct bfa_ioc_notify ioc_notify; + struct bfa_ioc *ioc; +}; + +u32 bfa_msgq_meminfo(void); +void bfa_msgq_memclaim(struct bfa_msgq *msgq, u8 *kva, u64 pa); +void bfa_msgq_attach(struct bfa_msgq *msgq, struct bfa_ioc *ioc); +void bfa_msgq_regisr(struct bfa_msgq *msgq, enum bfi_mclass mc, + bfa_msgq_mcfunc_t cbfn, void *cbarg); +void bfa_msgq_cmd_post(struct bfa_msgq *msgq, + struct bfa_msgq_cmd_entry *cmd); +void bfa_msgq_rsp_copy(struct bfa_msgq *msgq, u8 *buf, size_t buf_len); + +#endif diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h new file mode 100644 index 0000000000..f780d42c94 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi.h @@ -0,0 +1,551 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFI_H__ +#define __BFI_H__ + +#include "bfa_defs.h" + +/* BFI FW image type */ +#define BFI_FLASH_CHUNK_SZ 256 /*!< Flash chunk size */ +#define BFI_FLASH_CHUNK_SZ_WORDS (BFI_FLASH_CHUNK_SZ/sizeof(u32)) +#define BFI_FLASH_IMAGE_SZ 0x100000 + +/* Msg header common to all msgs */ +struct bfi_mhdr { + u8 msg_class; /*!< @ref enum bfi_mclass */ + u8 msg_id; /*!< msg opcode with in the class */ + union { + struct { + u8 qid; + u8 fn_lpu; /*!< msg destination */ + } __packed h2i; + u16 i2htok; /*!< token in msgs to host */ + } __packed mtag; +} __packed; + +#define bfi_fn_lpu(__fn, __lpu) ((__fn) << 1 | (__lpu)) +#define bfi_mhdr_2_fn(_mh) ((_mh)->mtag.h2i.fn_lpu >> 1) +#define bfi_mhdr_2_qid(_mh) ((_mh)->mtag.h2i.qid) + +#define bfi_h2i_set(_mh, _mc, _op, _fn_lpu) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.h2i.fn_lpu = (_fn_lpu); \ +} while (0) + +#define bfi_i2h_set(_mh, _mc, _op, _i2htok) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_op); \ + (_mh).mtag.i2htok = (_i2htok); \ +} while (0) + +/* + * Message opcodes: 0-127 to firmware, 128-255 to host + */ +#define BFI_I2H_OPCODE_BASE 128 +#define BFA_I2HM(_x) ((_x) + BFI_I2H_OPCODE_BASE) + +/**************************************************************************** + * + * Scatter Gather Element and Page definition + * + **************************************************************************** + */ + +/* DMA addresses */ +union bfi_addr_u { + struct { + u32 addr_lo; + u32 addr_hi; + } __packed a32; +} __packed; + +/* Generic DMA addr-len pair. */ +struct bfi_alen { + union bfi_addr_u al_addr; /* DMA addr of buffer */ + u32 al_len; /* length of buffer */ +} __packed; + +/* + * Large Message structure - 128 Bytes size Msgs + */ +#define BFI_LMSG_SZ 128 +#define BFI_LMSG_PL_WSZ \ + ((BFI_LMSG_SZ - sizeof(struct bfi_mhdr)) / 4) + +/* Mailbox message structure */ +#define BFI_MBMSG_SZ 7 +struct bfi_mbmsg { + struct bfi_mhdr mh; + u32 pl[BFI_MBMSG_SZ]; +} __packed; + +/* Supported PCI function class codes (personality) */ +enum bfi_pcifn_class { + BFI_PCIFN_CLASS_FC = 0x0c04, + BFI_PCIFN_CLASS_ETH = 0x0200, +}; + +/* Message Classes */ +enum bfi_mclass { + BFI_MC_IOC = 1, /*!< IO Controller (IOC) */ + BFI_MC_DIAG = 2, /*!< Diagnostic Msgs */ + BFI_MC_FLASH = 3, /*!< Flash message class */ + BFI_MC_CEE = 4, /*!< CEE */ + BFI_MC_FCPORT = 5, /*!< FC port */ + BFI_MC_IOCFC = 6, /*!< FC - IO Controller (IOC) */ + BFI_MC_LL = 7, /*!< Link Layer */ + BFI_MC_UF = 8, /*!< Unsolicited frame receive */ + BFI_MC_FCXP = 9, /*!< FC Transport */ + BFI_MC_LPS = 10, /*!< lport fc login services */ + BFI_MC_RPORT = 11, /*!< Remote port */ + BFI_MC_ITNIM = 12, /*!< I-T nexus (Initiator mode) */ + BFI_MC_IOIM_READ = 13, /*!< read IO (Initiator mode) */ + BFI_MC_IOIM_WRITE = 14, /*!< write IO (Initiator mode) */ + BFI_MC_IOIM_IO = 15, /*!< IO (Initiator mode) */ + BFI_MC_IOIM = 16, /*!< IO (Initiator mode) */ + BFI_MC_IOIM_IOCOM = 17, /*!< good IO completion */ + BFI_MC_TSKIM = 18, /*!< Initiator Task management */ + BFI_MC_SBOOT = 19, /*!< SAN boot services */ + BFI_MC_IPFC = 20, /*!< IP over FC Msgs */ + BFI_MC_PORT = 21, /*!< Physical port */ + BFI_MC_SFP = 22, /*!< SFP module */ + BFI_MC_MSGQ = 23, /*!< MSGQ */ + BFI_MC_ENET = 24, /*!< ENET commands/responses */ + BFI_MC_PHY = 25, /*!< External PHY message class */ + BFI_MC_NBOOT = 26, /*!< Network Boot */ + BFI_MC_TIO_READ = 27, /*!< read IO (Target mode) */ + BFI_MC_TIO_WRITE = 28, /*!< write IO (Target mode) */ + BFI_MC_TIO_DATA_XFERED = 29, /*!< ds transferred (target mode) */ + BFI_MC_TIO_IO = 30, /*!< IO (Target mode) */ + BFI_MC_TIO = 31, /*!< IO (target mode) */ + BFI_MC_MFG = 32, /*!< MFG/ASIC block commands */ + BFI_MC_EDMA = 33, /*!< EDMA copy commands */ + BFI_MC_MAX = 34 +}; + +#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */ + +#define BFI_FWBOOT_ENV_OS 0 + +/*---------------------------------------------------------------------- + * IOC + *---------------------------------------------------------------------- + */ + +/* Different asic generations */ +enum bfi_asic_gen { + BFI_ASIC_GEN_CB = 1, + BFI_ASIC_GEN_CT = 2, + BFI_ASIC_GEN_CT2 = 3, +}; + +enum bfi_asic_mode { + BFI_ASIC_MODE_FC = 1, /* FC up to 8G speed */ + BFI_ASIC_MODE_FC16 = 2, /* FC up to 16G speed */ + BFI_ASIC_MODE_ETH = 3, /* Ethernet ports */ + BFI_ASIC_MODE_COMBO = 4, /* FC 16G and Ethernet 10G port */ +}; + +enum bfi_ioc_h2i_msgs { + BFI_IOC_H2I_ENABLE_REQ = 1, + BFI_IOC_H2I_DISABLE_REQ = 2, + BFI_IOC_H2I_GETATTR_REQ = 3, + BFI_IOC_H2I_DBG_SYNC = 4, + BFI_IOC_H2I_DBG_DUMP = 5, +}; + +enum bfi_ioc_i2h_msgs { + BFI_IOC_I2H_ENABLE_REPLY = BFA_I2HM(1), + BFI_IOC_I2H_DISABLE_REPLY = BFA_I2HM(2), + BFI_IOC_I2H_GETATTR_REPLY = BFA_I2HM(3), + BFI_IOC_I2H_HBEAT = BFA_I2HM(4), +}; + +/* BFI_IOC_H2I_GETATTR_REQ message */ +struct bfi_ioc_getattr_req { + struct bfi_mhdr mh; + union bfi_addr_u attr_addr; +} __packed; + +struct bfi_ioc_attr { + u64 mfg_pwwn; /*!< Mfg port wwn */ + u64 mfg_nwwn; /*!< Mfg node wwn */ + u8 mfg_mac[ETH_ALEN]; /*!< Mfg mac */ + u8 port_mode; /* enum bfi_port_mode */ + u8 rsvd_a; + u64 pwwn; + u64 nwwn; + u8 mac[ETH_ALEN]; /*!< PBC or Mfg mac */ + u16 rsvd_b; + u8 fcoe_mac[ETH_ALEN]; + u16 rsvd_c; + char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)]; + u8 pcie_gen; + u8 pcie_lanes_orig; + u8 pcie_lanes; + u8 rx_bbcredit; /*!< receive buffer credits */ + u32 adapter_prop; /*!< adapter properties */ + u16 maxfrsize; /*!< max receive frame size */ + char asic_rev; + u8 rsvd_d; + char fw_version[BFA_VERSION_LEN]; + char optrom_version[BFA_VERSION_LEN]; + struct bfa_mfg_vpd vpd; + u32 card_type; /*!< card type */ +} __packed; + +/* BFI_IOC_I2H_GETATTR_REPLY message */ +struct bfi_ioc_getattr_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< cfg reply status */ + u8 rsvd[3]; +} __packed; + +/* Firmware memory page offsets */ +#define BFI_IOC_SMEM_PG0_CB (0x40) +#define BFI_IOC_SMEM_PG0_CT (0x180) + +/* Firmware statistic offset */ +#define BFI_IOC_FWSTATS_OFF (0x6B40) +#define BFI_IOC_FWSTATS_SZ (4096) + +/* Firmware trace offset */ +#define BFI_IOC_TRC_OFF (0x4b00) +#define BFI_IOC_TRC_ENTS 256 +#define BFI_IOC_TRC_ENT_SZ 16 +#define BFI_IOC_TRC_HDR_SZ 32 + +#define BFI_IOC_FW_SIGNATURE (0xbfadbfad) +#define BFI_IOC_FW_INV_SIGN (0xdeaddead) +#define BFI_IOC_MD5SUM_SZ 4 + +struct bfi_ioc_fwver { +#ifdef __BIG_ENDIAN + u8 patch; + u8 maint; + u8 minor; + u8 major; + u8 rsvd[2]; + u8 build; + u8 phase; +#else + u8 major; + u8 minor; + u8 maint; + u8 patch; + u8 phase; + u8 build; + u8 rsvd[2]; +#endif +} __packed; + +struct bfi_ioc_image_hdr { + u32 signature; /*!< constant signature */ + u8 asic_gen; /*!< asic generation */ + u8 asic_mode; + u8 port0_mode; /*!< device mode for port 0 */ + u8 port1_mode; /*!< device mode for port 1 */ + u32 exec; /*!< exec vector */ + u32 bootenv; /*!< firmware boot env */ + u32 rsvd_b[2]; + struct bfi_ioc_fwver fwver; + u32 md5sum[BFI_IOC_MD5SUM_SZ]; +} __packed; + +enum bfi_ioc_img_ver_cmp { + BFI_IOC_IMG_VER_INCOMP, + BFI_IOC_IMG_VER_OLD, + BFI_IOC_IMG_VER_SAME, + BFI_IOC_IMG_VER_BETTER +}; + +#define BFI_FWBOOT_DEVMODE_OFF 4 +#define BFI_FWBOOT_TYPE_OFF 8 +#define BFI_FWBOOT_ENV_OFF 12 +#define BFI_FWBOOT_DEVMODE(__asic_gen, __asic_mode, __p0_mode, __p1_mode) \ + (((u32)(__asic_gen)) << 24 | \ + ((u32)(__asic_mode)) << 16 | \ + ((u32)(__p0_mode)) << 8 | \ + ((u32)(__p1_mode))) + +enum bfi_fwboot_type { + BFI_FWBOOT_TYPE_NORMAL = 0, + BFI_FWBOOT_TYPE_FLASH = 1, + BFI_FWBOOT_TYPE_MEMTEST = 2, +}; + +enum bfi_port_mode { + BFI_PORT_MODE_FC = 1, + BFI_PORT_MODE_ETH = 2, +}; + +struct bfi_ioc_hbeat { + struct bfi_mhdr mh; /*!< common msg header */ + u32 hb_count; /*!< current heart beat count */ +} __packed; + +/* IOC hardware/firmware state */ +enum bfi_ioc_state { + BFI_IOC_UNINIT = 0, /*!< not initialized */ + BFI_IOC_INITING = 1, /*!< h/w is being initialized */ + BFI_IOC_HWINIT = 2, /*!< h/w is initialized */ + BFI_IOC_CFG = 3, /*!< IOC configuration in progress */ + BFI_IOC_OP = 4, /*!< IOC is operational */ + BFI_IOC_DISABLING = 5, /*!< IOC is being disabled */ + BFI_IOC_DISABLED = 6, /*!< IOC is disabled */ + BFI_IOC_CFG_DISABLED = 7, /*!< IOC is being disabled;transient */ + BFI_IOC_FAIL = 8, /*!< IOC heart-beat failure */ + BFI_IOC_MEMTEST = 9, /*!< IOC is doing memtest */ +}; + +enum { + BFI_ADAPTER_TYPE_FC = 0x01, /*!< FC adapters */ + BFI_ADAPTER_TYPE_MK = 0x0f0000, /*!< adapter type mask */ + BFI_ADAPTER_TYPE_SH = 16, /*!< adapter type shift */ + BFI_ADAPTER_NPORTS_MK = 0xff00, /*!< number of ports mask */ + BFI_ADAPTER_NPORTS_SH = 8, /*!< number of ports shift */ + BFI_ADAPTER_SPEED_MK = 0xff, /*!< adapter speed mask */ + BFI_ADAPTER_SPEED_SH = 0, /*!< adapter speed shift */ + BFI_ADAPTER_PROTO = 0x100000, /*!< prototype adapaters */ + BFI_ADAPTER_TTV = 0x200000, /*!< TTV debug capable */ + BFI_ADAPTER_UNSUPP = 0x400000, /*!< unknown adapter type */ +}; + +#define BFI_ADAPTER_GETP(__prop, __adap_prop) \ + (((__adap_prop) & BFI_ADAPTER_ ## __prop ## _MK) >> \ + BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_SETP(__prop, __val) \ + ((__val) << BFI_ADAPTER_ ## __prop ## _SH) +#define BFI_ADAPTER_IS_SPECIAL(__adap_type) \ + ((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO | \ + BFI_ADAPTER_UNSUPP)) + +/* BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages */ +struct bfi_ioc_ctrl_req { + struct bfi_mhdr mh; + u16 clscode; + u16 rsvd; + u32 tv_sec; +} __packed; + +/* BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages */ +struct bfi_ioc_ctrl_reply { + struct bfi_mhdr mh; /*!< Common msg header */ + u8 status; /*!< enable/disable status */ + u8 port_mode; /*!< enum bfa_mode */ + u8 cap_bm; /*!< capability bit mask */ + u8 rsvd; +} __packed; + +#define BFI_IOC_MSGSZ 8 +/* H2I Messages */ +union bfi_ioc_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_req enable_req; + struct bfi_ioc_ctrl_req disable_req; + struct bfi_ioc_getattr_req getattr_req; + u32 mboxmsg[BFI_IOC_MSGSZ]; +} __packed; + +/* I2H Messages */ +union bfi_ioc_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_ioc_ctrl_reply fw_event; + u32 mboxmsg[BFI_IOC_MSGSZ]; +} __packed; + +/*---------------------------------------------------------------------- + * MSGQ + *---------------------------------------------------------------------- + */ + +enum bfi_msgq_h2i_msgs { + BFI_MSGQ_H2I_INIT_REQ = 1, + BFI_MSGQ_H2I_DOORBELL_PI = 2, + BFI_MSGQ_H2I_DOORBELL_CI = 3, + BFI_MSGQ_H2I_CMDQ_COPY_RSP = 4, +}; + +enum bfi_msgq_i2h_msgs { + BFI_MSGQ_I2H_INIT_RSP = BFA_I2HM(BFI_MSGQ_H2I_INIT_REQ), + BFI_MSGQ_I2H_DOORBELL_PI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_PI), + BFI_MSGQ_I2H_DOORBELL_CI = BFA_I2HM(BFI_MSGQ_H2I_DOORBELL_CI), + BFI_MSGQ_I2H_CMDQ_COPY_REQ = BFA_I2HM(BFI_MSGQ_H2I_CMDQ_COPY_RSP), +}; + +/* Messages(commands/responsed/AENS will have the following header */ +struct bfi_msgq_mhdr { + u8 msg_class; + u8 msg_id; + u16 msg_token; + u16 num_entries; + u8 enet_id; + u8 rsvd; +} __packed; + +#define bfi_msgq_mhdr_set(_mh, _mc, _mid, _tok, _enet_id) do { \ + (_mh).msg_class = (_mc); \ + (_mh).msg_id = (_mid); \ + (_mh).msg_token = (_tok); \ + (_mh).enet_id = (_enet_id); \ +} while (0) + +/* + * Mailbox for messaging interface + */ +#define BFI_MSGQ_CMD_ENTRY_SIZE (64) /* TBD */ +#define BFI_MSGQ_RSP_ENTRY_SIZE (64) /* TBD */ + +#define bfi_msgq_num_cmd_entries(_size) \ + (((_size) + BFI_MSGQ_CMD_ENTRY_SIZE - 1) / BFI_MSGQ_CMD_ENTRY_SIZE) + +struct bfi_msgq { + union bfi_addr_u addr; + u16 q_depth; /* Total num of entries in the queue */ + u8 rsvd[2]; +} __packed; + +/* BFI_ENET_MSGQ_CFG_REQ TBD init or cfg? */ +struct bfi_msgq_cfg_req { + struct bfi_mhdr mh; + struct bfi_msgq cmdq; + struct bfi_msgq rspq; +} __packed; + +/* BFI_ENET_MSGQ_CFG_RSP */ +struct bfi_msgq_cfg_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +} __packed; + +/* BFI_MSGQ_H2I_DOORBELL */ +struct bfi_msgq_h2i_db { + struct bfi_mhdr mh; + union { + u16 cmdq_pi; + u16 rspq_ci; + } __packed idx; +} __packed; + +/* BFI_MSGQ_I2H_DOORBELL */ +struct bfi_msgq_i2h_db { + struct bfi_mhdr mh; + union { + u16 rspq_pi; + u16 cmdq_ci; + } __packed idx; +} __packed; + +#define BFI_CMD_COPY_SZ 28 + +/* BFI_MSGQ_H2I_CMD_COPY_RSP */ +struct bfi_msgq_h2i_cmdq_copy_rsp { + struct bfi_mhdr mh; + u8 data[BFI_CMD_COPY_SZ]; +} __packed; + +/* BFI_MSGQ_I2H_CMD_COPY_REQ */ +struct bfi_msgq_i2h_cmdq_copy_req { + struct bfi_mhdr mh; + u16 offset; + u16 len; +} __packed; + +/* + * FLASH module specific + */ +enum bfi_flash_h2i_msgs { + BFI_FLASH_H2I_QUERY_REQ = 1, + BFI_FLASH_H2I_ERASE_REQ = 2, + BFI_FLASH_H2I_WRITE_REQ = 3, + BFI_FLASH_H2I_READ_REQ = 4, + BFI_FLASH_H2I_BOOT_VER_REQ = 5, +}; + +enum bfi_flash_i2h_msgs { + BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1), + BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2), + BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3), + BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4), + BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5), + BFI_FLASH_I2H_EVENT = BFA_I2HM(127), +}; + +/* + * Flash query request + */ +struct bfi_flash_query_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; +} __packed; + +/* + * Flash write request + */ +struct bfi_flash_write_req { + struct bfi_mhdr mh; /* Common msg header */ + struct bfi_alen alen; + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 last; + u8 rsv[2]; + u32 offset; + u32 length; +} __packed; + +/* + * Flash read request + */ +struct bfi_flash_read_req { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 offset; + u32 length; + struct bfi_alen alen; +} __packed; + +/* + * Flash query response + */ +struct bfi_flash_query_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 status; +} __packed; + +/* + * Flash read response + */ +struct bfi_flash_read_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +} __packed; + +/* + * Flash write response + */ +struct bfi_flash_write_rsp { + struct bfi_mhdr mh; /* Common msg header */ + u32 type; /* partition type */ + u8 instance; /* partition instance */ + u8 rsv[3]; + u32 status; + u32 length; +} __packed; + +#endif /* __BFI_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_cna.h b/drivers/net/ethernet/brocade/bna/bfi_cna.h new file mode 100644 index 0000000000..fb78bfd0bc --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_cna.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BFI_CNA_H__ +#define __BFI_CNA_H__ + +#include "bfi.h" +#include "bfa_defs_cna.h" + +enum bfi_port_h2i { + BFI_PORT_H2I_ENABLE_REQ = (1), + BFI_PORT_H2I_DISABLE_REQ = (2), + BFI_PORT_H2I_GET_STATS_REQ = (3), + BFI_PORT_H2I_CLEAR_STATS_REQ = (4), +}; + +enum bfi_port_i2h { + BFI_PORT_I2H_ENABLE_RSP = BFA_I2HM(1), + BFI_PORT_I2H_DISABLE_RSP = BFA_I2HM(2), + BFI_PORT_I2H_GET_STATS_RSP = BFA_I2HM(3), + BFI_PORT_I2H_CLEAR_STATS_RSP = BFA_I2HM(4), +}; + +/* Generic REQ type */ +struct bfi_port_generic_req { + struct bfi_mhdr mh; /*!< msg header */ + u32 msgtag; /*!< msgtag for reply */ + u32 rsvd; +} __packed; + +/* Generic RSP type */ +struct bfi_port_generic_rsp { + struct bfi_mhdr mh; /*!< common msg header */ + u8 status; /*!< port enable status */ + u8 rsvd[3]; + u32 msgtag; /*!< msgtag for reply */ +} __packed; + +/* BFI_PORT_H2I_GET_STATS_REQ */ +struct bfi_port_get_stats_req { + struct bfi_mhdr mh; /*!< common msg header */ + union bfi_addr_u dma_addr; +} __packed; + +union bfi_port_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_req enable_req; + struct bfi_port_generic_req disable_req; + struct bfi_port_get_stats_req getstats_req; + struct bfi_port_generic_req clearstats_req; +} __packed; + +union bfi_port_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_port_generic_rsp enable_rsp; + struct bfi_port_generic_rsp disable_rsp; + struct bfi_port_generic_rsp getstats_rsp; + struct bfi_port_generic_rsp clearstats_rsp; +} __packed; + +/* @brief Mailbox commands from host to (DCBX/LLDP) firmware */ +enum bfi_cee_h2i_msgs { + BFI_CEE_H2I_GET_CFG_REQ = 1, + BFI_CEE_H2I_RESET_STATS = 2, + BFI_CEE_H2I_GET_STATS_REQ = 3, +}; + +/* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */ +enum bfi_cee_i2h_msgs { + BFI_CEE_I2H_GET_CFG_RSP = BFA_I2HM(1), + BFI_CEE_I2H_RESET_STATS_RSP = BFA_I2HM(2), + BFI_CEE_I2H_GET_STATS_RSP = BFA_I2HM(3), +}; + +/* Data structures */ + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_lldp_reset_stats { + struct bfi_mhdr mh; +} __packed; + +/* + * @brief H2I command structure for resetting the stats. + * BFI_CEE_H2I_RESET_STATS + */ +struct bfi_cee_reset_stats { + struct bfi_mhdr mh; +} __packed; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_CFG_REQ + */ +struct bfi_cee_get_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +} __packed; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_CFG_RSP + */ +struct bfi_cee_get_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +} __packed; + +/* + * @brief get configuration command from host + * BFI_CEE_H2I_GET_STATS_REQ + */ +struct bfi_cee_stats_req { + struct bfi_mhdr mh; + union bfi_addr_u dma_addr; +} __packed; + +/* + * @brief reply message from firmware + * BFI_CEE_I2H_GET_STATS_RSP + */ +struct bfi_cee_stats_rsp { + struct bfi_mhdr mh; + u8 cmd_status; + u8 rsvd[3]; +} __packed; + +/* @brief mailbox command structures from host to firmware */ +union bfi_cee_h2i_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_req get_req; + struct bfi_cee_stats_req stats_req; +} __packed; + +/* @brief mailbox message structures from firmware to host */ +union bfi_cee_i2h_msg_u { + struct bfi_mhdr mh; + struct bfi_cee_get_rsp get_rsp; + struct bfi_cee_stats_rsp stats_rsp; +} __packed; + +#endif /* __BFI_CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_enet.h b/drivers/net/ethernet/brocade/bna/bfi_enet.h new file mode 100644 index 0000000000..112aadf493 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_enet.h @@ -0,0 +1,847 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* BNA Hardware and Firmware Interface */ + +/* Skipping statistics collection to avoid clutter. + * Command is no longer needed: + * MTU + * TxQ Stop + * RxQ Stop + * RxF Enable/Disable + * + * HDS-off request is dynamic + * keep structures as multiple of 32-bit fields for alignment. + * All values must be written in big-endian. + */ +#ifndef __BFI_ENET_H__ +#define __BFI_ENET_H__ + +#include "bfa_defs.h" +#include "bfi.h" + +#define BFI_ENET_CFG_MAX 32 /* Max resources per PF */ + +#define BFI_ENET_TXQ_PRIO_MAX 8 +#define BFI_ENET_RX_QSET_MAX 16 +#define BFI_ENET_TXQ_WI_VECT_MAX 4 + +#define BFI_ENET_VLAN_ID_MAX 4096 +#define BFI_ENET_VLAN_BLOCK_SIZE 512 /* in bits */ +#define BFI_ENET_VLAN_BLOCKS_MAX \ + (BFI_ENET_VLAN_ID_MAX / BFI_ENET_VLAN_BLOCK_SIZE) +#define BFI_ENET_VLAN_WORD_SIZE 32 /* in bits */ +#define BFI_ENET_VLAN_WORDS_MAX \ + (BFI_ENET_VLAN_BLOCK_SIZE / BFI_ENET_VLAN_WORD_SIZE) + +#define BFI_ENET_RSS_RIT_MAX 64 /* entries */ +#define BFI_ENET_RSS_KEY_LEN 10 /* 32-bit words */ + +union bfi_addr_be_u { + struct { + u32 addr_hi; /* Most Significant 32-bits */ + u32 addr_lo; /* Least Significant 32-Bits */ + } __packed a32; +} __packed; + +/* T X Q U E U E D E F I N E S */ +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +/* TxQ Entry Opcodes */ +#define BFI_ENET_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BFI_ENET_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BFI_ENET_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BFI_ENET_TXQ_WI_CF_FCOE_CRC BIT(8) +#define BFI_ENET_TXQ_WI_CF_IPID_MODE BIT(5) +#define BFI_ENET_TXQ_WI_CF_INS_PRIO BIT(4) +#define BFI_ENET_TXQ_WI_CF_INS_VLAN BIT(3) +#define BFI_ENET_TXQ_WI_CF_UDP_CKSUM BIT(2) +#define BFI_ENET_TXQ_WI_CF_TCP_CKSUM BIT(1) +#define BFI_ENET_TXQ_WI_CF_IP_CKSUM BIT(0) + +struct bfi_enet_txq_wi_base { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; + /* BFI_ENET_TXQ_WI_SEND or BFI_ENET_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ +} __packed; + +struct bfi_enet_txq_wi_ext { + u16 reserved; + u16 opcode; /* BFI_ENET_TXQ_WI_EXTENSION */ + u32 reserved2[3]; +} __packed; + +struct bfi_enet_txq_wi_vector { /* Tx Buffer Descriptor */ + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + union bfi_addr_be_u addr; +} __packed; + +/* TxQ Entry Structure */ +struct bfi_enet_txq_entry { + union { + struct bfi_enet_txq_wi_base base; + struct bfi_enet_txq_wi_ext ext; + } __packed wi; + struct bfi_enet_txq_wi_vector vector[BFI_ENET_TXQ_WI_VECT_MAX]; +} __packed; + +#define wi_hdr wi.base +#define wi_ext_hdr wi.ext + +#define BFI_ENET_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/* R X Q U E U E D E F I N E S */ +struct bfi_enet_rxq_entry { + union bfi_addr_be_u rx_buffer; +} __packed; + +/* R X C O M P L E T I O N Q U E U E D E F I N E S */ +/* CQ Entry Flags */ +#define BFI_ENET_CQ_EF_MAC_ERROR BIT(0) +#define BFI_ENET_CQ_EF_FCS_ERROR BIT(1) +#define BFI_ENET_CQ_EF_TOO_LONG BIT(2) +#define BFI_ENET_CQ_EF_FC_CRC_OK BIT(3) + +#define BFI_ENET_CQ_EF_RSVD1 BIT(4) +#define BFI_ENET_CQ_EF_L4_CKSUM_OK BIT(5) +#define BFI_ENET_CQ_EF_L3_CKSUM_OK BIT(6) +#define BFI_ENET_CQ_EF_HDS_HEADER BIT(7) + +#define BFI_ENET_CQ_EF_UDP BIT(8) +#define BFI_ENET_CQ_EF_TCP BIT(9) +#define BFI_ENET_CQ_EF_IP_OPTIONS BIT(10) +#define BFI_ENET_CQ_EF_IPV6 BIT(11) + +#define BFI_ENET_CQ_EF_IPV4 BIT(12) +#define BFI_ENET_CQ_EF_VLAN BIT(13) +#define BFI_ENET_CQ_EF_RSS BIT(14) +#define BFI_ENET_CQ_EF_RSVD2 BIT(15) + +#define BFI_ENET_CQ_EF_MCAST_MATCH BIT(16) +#define BFI_ENET_CQ_EF_MCAST BIT(17) +#define BFI_ENET_CQ_EF_BCAST BIT(18) +#define BFI_ENET_CQ_EF_REMOTE BIT(19) + +#define BFI_ENET_CQ_EF_LOCAL BIT(20) + +/* CQ Entry Structure */ +struct bfi_enet_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +} __packed; + +/* E N E T C O N T R O L P A T H C O M M A N D S */ +struct bfi_enet_q { + union bfi_addr_u pg_tbl; + union bfi_addr_u first_entry; + u16 pages; /* # of pages */ + u16 page_sz; +} __packed; + +struct bfi_enet_txq { + struct bfi_enet_q q; + u8 priority; + u8 rsvd[3]; +} __packed; + +struct bfi_enet_rxq { + struct bfi_enet_q q; + u16 rx_buffer_size; + u16 rsvd; +} __packed; + +struct bfi_enet_cq { + struct bfi_enet_q q; +} __packed; + +struct bfi_enet_ib_cfg { + u8 int_pkt_dma; + u8 int_enabled; + u8 int_pkt_enabled; + u8 continuous_coalescing; + u8 msix; + u8 rsvd[3]; + u32 coalescing_timeout; + u32 inter_pkt_timeout; + u8 inter_pkt_count; + u8 rsvd1[3]; +} __packed; + +struct bfi_enet_ib { + union bfi_addr_u index_addr; + union { + u16 msix_index; + u16 intx_bitmask; + } __packed intr; + u16 rsvd; +} __packed; + +/* ENET command messages */ +enum bfi_enet_h2i_msgs { + /* Rx Commands */ + BFI_ENET_H2I_RX_CFG_SET_REQ = 1, + BFI_ENET_H2I_RX_CFG_CLR_REQ = 2, + + BFI_ENET_H2I_RIT_CFG_REQ = 3, + BFI_ENET_H2I_RSS_CFG_REQ = 4, + BFI_ENET_H2I_RSS_ENABLE_REQ = 5, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ = 6, + BFI_ENET_H2I_RX_DEFAULT_REQ = 7, + + BFI_ENET_H2I_MAC_UCAST_SET_REQ = 8, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ = 9, + BFI_ENET_H2I_MAC_UCAST_ADD_REQ = 10, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ = 11, + + BFI_ENET_H2I_MAC_MCAST_ADD_REQ = 12, + BFI_ENET_H2I_MAC_MCAST_DEL_REQ = 13, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ = 14, + + BFI_ENET_H2I_RX_VLAN_SET_REQ = 15, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ = 16, + + /* Tx Commands */ + BFI_ENET_H2I_TX_CFG_SET_REQ = 17, + BFI_ENET_H2I_TX_CFG_CLR_REQ = 18, + + /* Port Commands */ + BFI_ENET_H2I_PORT_ADMIN_UP_REQ = 19, + BFI_ENET_H2I_SET_PAUSE_REQ = 20, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ = 21, + + /* Get Attributes Command */ + BFI_ENET_H2I_GET_ATTR_REQ = 22, + + /* Statistics Commands */ + BFI_ENET_H2I_STATS_GET_REQ = 23, + BFI_ENET_H2I_STATS_CLR_REQ = 24, + + BFI_ENET_H2I_WOL_MAGIC_REQ = 25, + BFI_ENET_H2I_WOL_FRAME_REQ = 26, + + BFI_ENET_H2I_MAX = 27, +}; + +enum bfi_enet_i2h_msgs { + /* Rx Responses */ + BFI_ENET_I2H_RX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_SET_REQ), + BFI_ENET_I2H_RX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_CFG_CLR_REQ), + + BFI_ENET_I2H_RIT_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RIT_CFG_REQ), + BFI_ENET_I2H_RSS_CFG_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_CFG_REQ), + BFI_ENET_I2H_RSS_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RSS_ENABLE_REQ), + BFI_ENET_I2H_RX_PROMISCUOUS_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_PROMISCUOUS_REQ), + BFI_ENET_I2H_RX_DEFAULT_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_DEFAULT_REQ), + + BFI_ENET_I2H_MAC_UCAST_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_SET_REQ), + BFI_ENET_I2H_MAC_UCAST_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_CLR_REQ), + BFI_ENET_I2H_MAC_UCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_ADD_REQ), + BFI_ENET_I2H_MAC_UCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_UCAST_DEL_REQ), + + BFI_ENET_I2H_MAC_MCAST_ADD_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_ADD_REQ), + BFI_ENET_I2H_MAC_MCAST_DEL_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_DEL_REQ), + BFI_ENET_I2H_MAC_MCAST_FILTER_RSP = + BFA_I2HM(BFI_ENET_H2I_MAC_MCAST_FILTER_REQ), + + BFI_ENET_I2H_RX_VLAN_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_SET_REQ), + + BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP = + BFA_I2HM(BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ), + + /* Tx Responses */ + BFI_ENET_I2H_TX_CFG_SET_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_SET_REQ), + BFI_ENET_I2H_TX_CFG_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_TX_CFG_CLR_REQ), + + /* Port Responses */ + BFI_ENET_I2H_PORT_ADMIN_RSP = + BFA_I2HM(BFI_ENET_H2I_PORT_ADMIN_UP_REQ), + + BFI_ENET_I2H_SET_PAUSE_RSP = + BFA_I2HM(BFI_ENET_H2I_SET_PAUSE_REQ), + BFI_ENET_I2H_DIAG_LOOPBACK_RSP = + BFA_I2HM(BFI_ENET_H2I_DIAG_LOOPBACK_REQ), + + /* Attributes Response */ + BFI_ENET_I2H_GET_ATTR_RSP = + BFA_I2HM(BFI_ENET_H2I_GET_ATTR_REQ), + + /* Statistics Responses */ + BFI_ENET_I2H_STATS_GET_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_GET_REQ), + BFI_ENET_I2H_STATS_CLR_RSP = + BFA_I2HM(BFI_ENET_H2I_STATS_CLR_REQ), + + BFI_ENET_I2H_WOL_MAGIC_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_MAGIC_REQ), + BFI_ENET_I2H_WOL_FRAME_RSP = + BFA_I2HM(BFI_ENET_H2I_WOL_FRAME_REQ), + + /* AENs */ + BFI_ENET_I2H_LINK_DOWN_AEN = BFA_I2HM(BFI_ENET_H2I_MAX), + BFI_ENET_I2H_LINK_UP_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 1), + + BFI_ENET_I2H_PORT_ENABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 2), + BFI_ENET_I2H_PORT_DISABLE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 3), + + BFI_ENET_I2H_BW_UPDATE_AEN = BFA_I2HM(BFI_ENET_H2I_MAX + 4), +}; + +/* The following error codes can be returned by the enet commands */ +enum bfi_enet_err { + BFI_ENET_CMD_OK = 0, + BFI_ENET_CMD_FAIL = 1, + BFI_ENET_CMD_DUP_ENTRY = 2, /* !< Duplicate entry in CAM */ + BFI_ENET_CMD_CAM_FULL = 3, /* !< CAM is full */ + BFI_ENET_CMD_NOT_OWNER = 4, /* !< Not permitted, b'cos not owner */ + BFI_ENET_CMD_NOT_EXEC = 5, /* !< Was not sent to f/w at all */ + BFI_ENET_CMD_WAITING = 6, /* !< Waiting for completion */ + BFI_ENET_CMD_PORT_DISABLED = 7, /* !< port in disabled state */ +}; + +/* Generic Request + * + * bfi_enet_req is used by: + * BFI_ENET_H2I_RX_CFG_CLR_REQ + * BFI_ENET_H2I_TX_CFG_CLR_REQ + */ +struct bfi_enet_req { + struct bfi_msgq_mhdr mh; +} __packed; + +/* Enable/Disable Request + * + * bfi_enet_enable_req is used by: + * BFI_ENET_H2I_RSS_ENABLE_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_PROMISCUOUS_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_DEFAULT_REQ (enet_id must be zero) + * BFI_ENET_H2I_RX_MAC_MCAST_FILTER_REQ + * BFI_ENET_H2I_PORT_ADMIN_UP_REQ (enet_id must be zero) + */ +struct bfi_enet_enable_req { + struct bfi_msgq_mhdr mh; + u8 enable; /* 1 = enable; 0 = disable */ + u8 rsvd[3]; +} __packed; + +/* Generic Response */ +struct bfi_enet_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ +} __packed; + +/* GLOBAL CONFIGURATION */ + +/* bfi_enet_attr_req is used by: + * BFI_ENET_H2I_GET_ATTR_REQ + */ +struct bfi_enet_attr_req { + struct bfi_msgq_mhdr mh; +} __packed; + +/* bfi_enet_attr_rsp is used by: + * BFI_ENET_I2H_GET_ATTR_RSP + */ +struct bfi_enet_attr_rsp { + struct bfi_msgq_mhdr mh; + u8 error; /*!< if error see cmd_offset */ + u8 rsvd; + u16 cmd_offset; /*!< offset to invalid parameter */ + u32 max_cfg; + u32 max_ucmac; + u32 rit_size; +} __packed; + +/* Tx Configuration + * + * bfi_enet_tx_cfg is used by: + * BFI_ENET_H2I_TX_CFG_SET_REQ + */ +enum bfi_enet_tx_vlan_mode { + BFI_ENET_TX_VLAN_NOP = 0, + BFI_ENET_TX_VLAN_INS = 1, + BFI_ENET_TX_VLAN_WI = 2, +}; + +struct bfi_enet_tx_cfg { + u8 vlan_mode; /*!< processing mode */ + u8 rsvd; + u16 vlan_id; + u8 admit_tagged_frame; + u8 apply_vlan_filter; + u8 add_to_vswitch; + u8 rsvd1[1]; +} __packed; + +struct bfi_enet_tx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queues; /* # of Tx Queues */ + u8 rsvd[3]; + + struct { + struct bfi_enet_txq q; + struct bfi_enet_ib ib; + } __packed q_cfg[BFI_ENET_TXQ_PRIO_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_tx_cfg tx_cfg; +}; + +struct bfi_enet_tx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 q_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_qid; /* For debugging */ + u8 rsvd[3]; + } __packed q_handles[BFI_ENET_TXQ_PRIO_MAX]; +}; + +/* Rx Configuration + * + * bfi_enet_rx_cfg is used by: + * BFI_ENET_H2I_RX_CFG_SET_REQ + */ +enum bfi_enet_rxq_type { + BFI_ENET_RXQ_SINGLE = 1, + BFI_ENET_RXQ_LARGE_SMALL = 2, + BFI_ENET_RXQ_HDS = 3, + BFI_ENET_RXQ_HDS_OPT_BASED = 4, +}; + +enum bfi_enet_hds_type { + BFI_ENET_HDS_FORCED = 0x01, + BFI_ENET_HDS_IPV6_UDP = 0x02, + BFI_ENET_HDS_IPV6_TCP = 0x04, + BFI_ENET_HDS_IPV4_TCP = 0x08, + BFI_ENET_HDS_IPV4_UDP = 0x10, +}; + +struct bfi_enet_rx_cfg { + u8 rxq_type; + u8 rsvd[1]; + u16 frame_size; + + struct { + u8 max_header_size; + u8 force_offset; + u8 type; + u8 rsvd1; + } __packed hds; + + u8 multi_buffer; + u8 strip_vlan; + u8 drop_untagged; + u8 rsvd2; +} __packed; + +/* + * Multicast frames are received on the ql of q-set index zero. + * On the completion queue. RxQ ID = even is for large/data buffer queues + * and RxQ ID = odd is for small/header buffer queues. + */ +struct bfi_enet_rx_cfg_req { + struct bfi_msgq_mhdr mh; + u8 num_queue_sets; /* # of Rx Queue Sets */ + u8 rsvd[3]; + + struct { + struct bfi_enet_rxq ql; /* large/data/single buffers */ + struct bfi_enet_rxq qs; /* small/header buffers */ + struct bfi_enet_cq cq; + struct bfi_enet_ib ib; + } __packed q_cfg[BFI_ENET_RX_QSET_MAX]; + + struct bfi_enet_ib_cfg ib_cfg; + + struct bfi_enet_rx_cfg rx_cfg; +} __packed; + +struct bfi_enet_rx_cfg_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 hw_id; /* For debugging */ + u8 rsvd[2]; + struct { + u32 ql_dbell; /* PCI base address offset */ + u32 qs_dbell; /* PCI base address offset */ + u32 i_dbell; /* PCI base address offset */ + u8 hw_lqid; /* For debugging */ + u8 hw_sqid; /* For debugging */ + u8 hw_cqid; /* For debugging */ + u8 rsvd; + } __packed q_handles[BFI_ENET_RX_QSET_MAX]; +} __packed; + +/* RIT + * + * bfi_enet_rit_req is used by: + * BFI_ENET_H2I_RIT_CFG_REQ + */ +struct bfi_enet_rit_req { + struct bfi_msgq_mhdr mh; + u16 size; /* number of table-entries used */ + u8 rsvd[2]; + u8 table[BFI_ENET_RSS_RIT_MAX]; +} __packed; + +/* RSS + * + * bfi_enet_rss_cfg_req is used by: + * BFI_ENET_H2I_RSS_CFG_REQ + */ +enum bfi_enet_rss_type { + BFI_ENET_RSS_IPV6 = 0x01, + BFI_ENET_RSS_IPV6_TCP = 0x02, + BFI_ENET_RSS_IPV4 = 0x04, + BFI_ENET_RSS_IPV4_TCP = 0x08 +}; + +struct bfi_enet_rss_cfg { + u8 type; + u8 mask; + u8 rsvd[2]; + u32 key[BFI_ENET_RSS_KEY_LEN]; +} __packed; + +struct bfi_enet_rss_cfg_req { + struct bfi_msgq_mhdr mh; + struct bfi_enet_rss_cfg cfg; +} __packed; + +/* MAC Unicast + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_MAC_UCAST_SET_REQ + * BFI_ENET_H2I_MAC_UCAST_CLR_REQ + * BFI_ENET_H2I_MAC_UCAST_ADD_REQ + * BFI_ENET_H2I_MAC_UCAST_DEL_REQ + */ +struct bfi_enet_ucast_req { + struct bfi_msgq_mhdr mh; + u8 mac_addr[ETH_ALEN]; + u8 rsvd[2]; +} __packed; + +/* MAC Unicast + VLAN */ +struct bfi_enet_mac_n_vlan_req { + struct bfi_msgq_mhdr mh; + u16 vlan_id; + u8 mac_addr[ETH_ALEN]; +} __packed; + +/* MAC Multicast + * + * bfi_enet_mac_mfilter_add_req is used by: + * BFI_ENET_H2I_MAC_MCAST_ADD_REQ + */ +struct bfi_enet_mcast_add_req { + struct bfi_msgq_mhdr mh; + u8 mac_addr[ETH_ALEN]; + u8 rsvd[2]; +} __packed; + +/* bfi_enet_mac_mfilter_add_rsp is used by: + * BFI_ENET_I2H_MAC_MCAST_ADD_RSP + */ +struct bfi_enet_mcast_add_rsp { + struct bfi_msgq_mhdr mh; + u8 error; + u8 rsvd; + u16 cmd_offset; + u16 handle; + u8 rsvd1[2]; +} __packed; + +/* bfi_enet_mac_mfilter_del_req is used by: + * BFI_ENET_H2I_MAC_MCAST_DEL_REQ + */ +struct bfi_enet_mcast_del_req { + struct bfi_msgq_mhdr mh; + u16 handle; + u8 rsvd[2]; +} __packed; + +/* VLAN + * + * bfi_enet_rx_vlan_req is used by: + * BFI_ENET_H2I_RX_VLAN_SET_REQ + */ +struct bfi_enet_rx_vlan_req { + struct bfi_msgq_mhdr mh; + u8 block_idx; + u8 rsvd[3]; + u32 bit_mask[BFI_ENET_VLAN_WORDS_MAX]; +} __packed; + +/* PAUSE + * + * bfi_enet_set_pause_req is used by: + * BFI_ENET_H2I_SET_PAUSE_REQ + */ +struct bfi_enet_set_pause_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 tx_pause; /* 1 = enable; 0 = disable */ + u8 rx_pause; /* 1 = enable; 0 = disable */ +} __packed; + +/* DIAGNOSTICS + * + * bfi_enet_diag_lb_req is used by: + * BFI_ENET_H2I_DIAG_LOOPBACK + */ +struct bfi_enet_diag_lb_req { + struct bfi_msgq_mhdr mh; + u8 rsvd[2]; + u8 mode; /* cable or Serdes */ + u8 enable; /* 1 = enable; 0 = disable */ +} __packed; + +/* enum for Loopback opmodes */ +enum { + BFI_ENET_DIAG_LB_OPMODE_EXT = 0, + BFI_ENET_DIAG_LB_OPMODE_CBL = 1, +}; + +/* STATISTICS + * + * bfi_enet_stats_req is used by: + * BFI_ENET_H2I_STATS_GET_REQ + * BFI_ENET_I2H_STATS_CLR_REQ + */ +struct bfi_enet_stats_req { + struct bfi_msgq_mhdr mh; + u16 stats_mask; + u8 rsvd[2]; + u32 rx_enet_mask; + u32 tx_enet_mask; + union bfi_addr_u host_buffer; +} __packed; + +/* defines for "stats_mask" above. */ +#define BFI_ENET_STATS_MAC BIT(0) /* !< MAC Statistics */ +#define BFI_ENET_STATS_BPC BIT(1) /* !< Pause Stats from BPC */ +#define BFI_ENET_STATS_RAD BIT(2) /* !< Rx Admission Statistics */ +#define BFI_ENET_STATS_RX_FC BIT(3) /* !< Rx FC Stats from RxA */ +#define BFI_ENET_STATS_TX_FC BIT(4) /* !< Tx FC Stats from TxA */ + +#define BFI_ENET_STATS_ALL 0x1f + +/* TxF Frame Statistics */ +struct bfi_enet_stats_txf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + + u64 errors; + u64 filter_vlan; /* frames filtered due to VLAN */ + u64 filter_mac_sa; /* frames filtered due to SA check */ +} __packed; + +/* RxF Frame Statistics */ +struct bfi_enet_stats_rxf { + u64 ucast_octets; + u64 ucast; + u64 ucast_vlan; + + u64 mcast_octets; + u64 mcast; + u64 mcast_vlan; + + u64 bcast_octets; + u64 bcast; + u64 bcast_vlan; + u64 frame_drops; +} __packed; + +/* FC Tx Frame Statistics */ +struct bfi_enet_stats_fc_tx { + u64 txf_ucast_octets; + u64 txf_ucast; + u64 txf_ucast_vlan; + + u64 txf_mcast_octets; + u64 txf_mcast; + u64 txf_mcast_vlan; + + u64 txf_bcast_octets; + u64 txf_bcast; + u64 txf_bcast_vlan; + + u64 txf_parity_errors; + u64 txf_timeout; + u64 txf_fid_parity_errors; +} __packed; + +/* FC Rx Frame Statistics */ +struct bfi_enet_stats_fc_rx { + u64 rxf_ucast_octets; + u64 rxf_ucast; + u64 rxf_ucast_vlan; + + u64 rxf_mcast_octets; + u64 rxf_mcast; + u64 rxf_mcast_vlan; + + u64 rxf_bcast_octets; + u64 rxf_bcast; + u64 rxf_bcast_vlan; +} __packed; + +/* RAD Frame Statistics */ +struct bfi_enet_stats_rad { + u64 rx_frames; + u64 rx_octets; + u64 rx_vlan_frames; + + u64 rx_ucast; + u64 rx_ucast_octets; + u64 rx_ucast_vlan; + + u64 rx_mcast; + u64 rx_mcast_octets; + u64 rx_mcast_vlan; + + u64 rx_bcast; + u64 rx_bcast_octets; + u64 rx_bcast_vlan; + + u64 rx_drops; +} __packed; + +/* BPC Tx Registers */ +struct bfi_enet_stats_bpc { + /* transmit stats */ + u64 tx_pause[8]; + u64 tx_zero_pause[8]; /*!< Pause cancellation */ + /*!<Pause initiation rather than retention */ + u64 tx_first_pause[8]; + + /* receive stats */ + u64 rx_pause[8]; + u64 rx_zero_pause[8]; /*!< Pause cancellation */ + /*!<Pause initiation rather than retention */ + u64 rx_first_pause[8]; +} __packed; + +/* MAC Rx Statistics */ +struct bfi_enet_stats_mac { + u64 stats_clr_cnt; /* times this stats cleared */ + u64 frame_64; /* both rx and tx counter */ + u64 frame_65_127; /* both rx and tx counter */ + u64 frame_128_255; /* both rx and tx counter */ + u64 frame_256_511; /* both rx and tx counter */ + u64 frame_512_1023; /* both rx and tx counter */ + u64 frame_1024_1518; /* both rx and tx counter */ + u64 frame_1519_1522; /* both rx and tx counter */ + + /* receive stats */ + u64 rx_bytes; + u64 rx_packets; + u64 rx_fcs_error; + u64 rx_multicast; + u64 rx_broadcast; + u64 rx_control_frames; + u64 rx_pause; + u64 rx_unknown_opcode; + u64 rx_alignment_error; + u64 rx_frame_length_error; + u64 rx_code_error; + u64 rx_carrier_sense_error; + u64 rx_undersize; + u64 rx_oversize; + u64 rx_fragments; + u64 rx_jabber; + u64 rx_drop; + + /* transmit stats */ + u64 tx_bytes; + u64 tx_packets; + u64 tx_multicast; + u64 tx_broadcast; + u64 tx_pause; + u64 tx_deferral; + u64 tx_excessive_deferral; + u64 tx_single_collision; + u64 tx_muliple_collision; + u64 tx_late_collision; + u64 tx_excessive_collision; + u64 tx_total_collision; + u64 tx_pause_honored; + u64 tx_drop; + u64 tx_jabber; + u64 tx_fcs_error; + u64 tx_control_frame; + u64 tx_oversize; + u64 tx_undersize; + u64 tx_fragments; +} __packed; + +/* Complete statistics, DMAed from fw to host followed by + * BFI_ENET_I2H_STATS_GET_RSP + */ +struct bfi_enet_stats { + struct bfi_enet_stats_mac mac_stats; + struct bfi_enet_stats_bpc bpc_stats; + struct bfi_enet_stats_rad rad_stats; + struct bfi_enet_stats_rad rlb_stats; + struct bfi_enet_stats_fc_rx fc_rx_stats; + struct bfi_enet_stats_fc_tx fc_tx_stats; + struct bfi_enet_stats_rxf rxf_stats[BFI_ENET_CFG_MAX]; + struct bfi_enet_stats_txf txf_stats[BFI_ENET_CFG_MAX]; +} __packed; + +#endif /* __BFI_ENET_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h new file mode 100644 index 0000000000..b15ae9e341 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h @@ -0,0 +1,449 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* + * bfi_reg.h ASIC register defines for all QLogic BR-series adapter ASICs + */ + +#ifndef __BFI_REG_H__ +#define __BFI_REG_H__ + +#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */ +#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */ +#define HOSTFN2_INT_STATUS 0x00014300 /* ct */ +#define HOSTFN3_INT_STATUS 0x00014400 /* ct */ +#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */ +#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */ +#define HOSTFN2_INT_MSK 0x00014304 /* ct */ +#define HOSTFN3_INT_MSK 0x00014404 /* ct */ + +#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */ +#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */ +#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */ +#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */ + +#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */ +#define __P_LCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000 +#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_LCLK_RESET_TIMER_SH 17 +#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH) +#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_LCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH) +#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_LCLK_JITLMT0_1_SH 12 +#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH) +#define __APP_PLL_LCLK_HREF 0x00000800 +#define __APP_PLL_LCLK_HDIV 0x00000400 +#define __APP_PLL_LCLK_P0_1_MK 0x00000300 +#define __APP_PLL_LCLK_P0_1_SH 8 +#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH) +#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_LCLK_Z0_2_SH 5 +#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH) +#define __APP_PLL_LCLK_RSEL200500 0x00000010 +#define __APP_PLL_LCLK_ENARST 0x00000008 +#define __APP_PLL_LCLK_BYPASS 0x00000004 +#define __APP_PLL_LCLK_LRESETN 0x00000002 +#define __APP_PLL_LCLK_ENABLE 0x00000001 +#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */ +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000 +#define __APP_PLL_SCLK_RESET_TIMER_SH 17 +#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH) +#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000 +#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000 +#define __APP_PLL_SCLK_CNTLMT0_1_SH 14 +#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH) +#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000 +#define __APP_PLL_SCLK_JITLMT0_1_SH 12 +#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH) +#define __APP_PLL_SCLK_HREF 0x00000800 +#define __APP_PLL_SCLK_HDIV 0x00000400 +#define __APP_PLL_SCLK_P0_1_MK 0x00000300 +#define __APP_PLL_SCLK_P0_1_SH 8 +#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH) +#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0 +#define __APP_PLL_SCLK_Z0_2_SH 5 +#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH) +#define __APP_PLL_SCLK_RSEL200500 0x00000010 +#define __APP_PLL_SCLK_ENARST 0x00000008 +#define __APP_PLL_SCLK_BYPASS 0x00000004 +#define __APP_PLL_SCLK_LRESETN 0x00000002 +#define __APP_PLL_SCLK_ENABLE 0x00000001 +#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */ +#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */ +#define __ENABLE_MAC_1 0x00200000 /* ct */ +#define __ENABLE_MAC_0 0x00100000 /* ct */ + +#define HOST_SEM0_REG 0x00014230 /* cb/ct */ +#define HOST_SEM1_REG 0x00014234 /* cb/ct */ +#define HOST_SEM2_REG 0x00014238 /* cb/ct */ +#define HOST_SEM3_REG 0x0001423c /* cb/ct */ +#define HOST_SEM4_REG 0x00014610 /* cb/ct */ +#define HOST_SEM5_REG 0x00014614 /* cb/ct */ +#define HOST_SEM6_REG 0x00014618 /* cb/ct */ +#define HOST_SEM7_REG 0x0001461c /* cb/ct */ +#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */ +#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */ +#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */ +#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */ +#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */ +#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */ +#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */ +#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */ + +#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */ +#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */ +#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */ +#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */ +#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */ +#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */ +#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */ +#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */ +#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */ +#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */ +#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */ +#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */ +#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */ +#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */ +#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */ +#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */ + +#define PSS_CTL_REG 0x00018800 /* cb/ct */ +#define __PSS_I2C_CLK_DIV_MK 0x007f0000 +#define __PSS_I2C_CLK_DIV_SH 16 +#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH) +#define __PSS_LMEM_INIT_DONE 0x00001000 +#define __PSS_LMEM_RESET 0x00000200 +#define __PSS_LMEM_INIT_EN 0x00000100 +#define __PSS_LPU1_RESET 0x00000002 +#define __PSS_LPU0_RESET 0x00000001 +#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */ +#define ERR_SET_REG 0x00018818 /* cb/ct */ +#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */ +#define __PSS_GPIO_OUT_REG 0x00000fff +#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */ +#define __PSS_GPIO_OE_REG 0x000000ff + +#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */ +#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */ +#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */ +#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */ +#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */ +#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */ +#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */ +#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */ + +#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */ +#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */ + +#define MBIST_CTL_REG 0x00014220 /* ct */ +#define __EDRAM_BISTR_START 0x00000004 +#define MBIST_STAT_REG 0x00014224 /* ct */ +#define ETH_MAC_SER_REG 0x00014288 /* ct */ +#define __APP_EMS_CKBUFAMPIN 0x00000020 +#define __APP_EMS_REFCLKSEL 0x00000010 +#define __APP_EMS_CMLCKSEL 0x00000008 +#define __APP_EMS_REFCKBUFEN2 0x00000004 +#define __APP_EMS_REFCKBUFEN1 0x00000002 +#define __APP_EMS_CHANNEL_SEL 0x00000001 +#define FNC_PERS_REG 0x00014604 /* ct */ +#define __F3_FUNCTION_ACTIVE 0x80000000 +#define __F3_FUNCTION_MODE 0x40000000 +#define __F3_PORT_MAP_MK 0x30000000 +#define __F3_PORT_MAP_SH 28 +#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH) +#define __F3_VM_MODE 0x08000000 +#define __F3_INTX_STATUS_MK 0x07000000 +#define __F3_INTX_STATUS_SH 24 +#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH) +#define __F2_FUNCTION_ACTIVE 0x00800000 +#define __F2_FUNCTION_MODE 0x00400000 +#define __F2_PORT_MAP_MK 0x00300000 +#define __F2_PORT_MAP_SH 20 +#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH) +#define __F2_VM_MODE 0x00080000 +#define __F2_INTX_STATUS_MK 0x00070000 +#define __F2_INTX_STATUS_SH 16 +#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH) +#define __F1_FUNCTION_ACTIVE 0x00008000 +#define __F1_FUNCTION_MODE 0x00004000 +#define __F1_PORT_MAP_MK 0x00003000 +#define __F1_PORT_MAP_SH 12 +#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH) +#define __F1_VM_MODE 0x00000800 +#define __F1_INTX_STATUS_MK 0x00000700 +#define __F1_INTX_STATUS_SH 8 +#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH) +#define __F0_FUNCTION_ACTIVE 0x00000080 +#define __F0_FUNCTION_MODE 0x00000040 +#define __F0_PORT_MAP_MK 0x00000030 +#define __F0_PORT_MAP_SH 4 +#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH) +#define __F0_VM_MODE 0x00000008 +#define __F0_INTX_STATUS 0x00000007 +enum { + __F0_INTX_STATUS_MSIX = 0x0, + __F0_INTX_STATUS_INTA = 0x1, + __F0_INTX_STATUS_INTB = 0x2, + __F0_INTX_STATUS_INTC = 0x3, + __F0_INTX_STATUS_INTD = 0x4, +}; + +#define OP_MODE 0x0001460c +#define __APP_ETH_CLK_LOWSPEED 0x00000004 +#define __GLOBAL_CORECLK_HALFSPEED 0x00000002 +#define __GLOBAL_FCOE_MODE 0x00000001 +#define FW_INIT_HALT_P0 0x000191ac +#define __FW_INIT_HALT_P 0x00000001 +#define FW_INIT_HALT_P1 0x000191bc +#define PMM_1T_RESET_REG_P0 0x0002381c +#define __PMM_1T_RESET_P 0x00000001 +#define PMM_1T_RESET_REG_P1 0x00023c1c + +/* QLogic BR-series 1860 Adapter specific defines */ +#define CT2_PCI_CPQ_BASE 0x00030000 +#define CT2_PCI_APP_BASE 0x00030100 +#define CT2_PCI_ETH_BASE 0x00030400 + +/* + * APP block registers + */ +#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00) +#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04) +#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08) +#define __PME_STATUS_ 0x00200000 +#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000 +#define __PF_VF_BAR_SIZE_MODE__SH 19 +#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH) +#define __FC_LL_PORT_MAP__MK 0x00060000 +#define __FC_LL_PORT_MAP__SH 17 +#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH) +#define __PF_VF_ACTIVE_ 0x00010000 +#define __PF_VF_CFG_RDY_ 0x00008000 +#define __PF_VF_ENABLE_ 0x00004000 +#define __PF_DRIVER_ACTIVE_ 0x00002000 +#define __PF_PME_SEND_ENABLE_ 0x00001000 +#define __PF_EXROM_OFFSET__MK 0x00000ff0 +#define __PF_EXROM_OFFSET__SH 4 +#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH) +#define __FC_LL_MODE_ 0x00000008 +#define __PF_INTX_PIN_ 0x00000007 +#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C) +#define __PF_NUM_QUEUES1__MK 0xff000000 +#define __PF_NUM_QUEUES1__SH 24 +#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH) +#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000 +#define __PF_VF_QUE_OFFSET1__SH 16 +#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH) +#define __PF_VF_NUM_QUEUES__MK 0x0000ff00 +#define __PF_VF_NUM_QUEUES__SH 8 +#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH) +#define __PF_VF_QUE_OFFSET_ 0x000000ff +#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18) +#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38) + +/* + * QLogic BR-series 1860 adapter CPQ block registers + */ +#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00) +#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20) +#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40) +#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60) +#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80) +#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84) +#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88) +#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c) +#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90) +#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94) +#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98) +#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C) +#define CT2_HOST_SEM0_REG 0x000148f0 +#define CT2_HOST_SEM1_REG 0x000148f4 +#define CT2_HOST_SEM2_REG 0x000148f8 +#define CT2_HOST_SEM3_REG 0x000148fc +#define CT2_HOST_SEM4_REG 0x00014900 +#define CT2_HOST_SEM5_REG 0x00014904 +#define CT2_HOST_SEM6_REG 0x00014908 +#define CT2_HOST_SEM7_REG 0x0001490c +#define CT2_HOST_SEM0_INFO_REG 0x000148b0 +#define CT2_HOST_SEM1_INFO_REG 0x000148b4 +#define CT2_HOST_SEM2_INFO_REG 0x000148b8 +#define CT2_HOST_SEM3_INFO_REG 0x000148bc +#define CT2_HOST_SEM4_INFO_REG 0x000148c0 +#define CT2_HOST_SEM5_INFO_REG 0x000148c4 +#define CT2_HOST_SEM6_INFO_REG 0x000148c8 +#define CT2_HOST_SEM7_INFO_REG 0x000148cc + +#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808 +#define __APP_LPUCLK_HALFSPEED 0x40000000 +#define __APP_PLL_LCLK_LOAD 0x20000000 +#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000 +#define __APP_PLL_LCLK_FBCNT_SH 21 +#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_LCLK_FBCNT_425_MHZ = 6, + __APP_PLL_LCLK_FBCNT_468_MHZ = 4, +}; +#define __APP_PLL_LCLK_EXTFB 0x00000800 +#define __APP_PLL_LCLK_ENOUTS 0x00000400 +#define __APP_PLL_LCLK_RATE 0x00000010 +#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c +#define __P_SCLK_PLL_LOCK 0x80000000 +#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000 +#define __APP_PLL_SCLK_CLK_DIV2 0x20000000 +#define __APP_PLL_SCLK_LOAD 0x10000000 +#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000 +#define __APP_PLL_SCLK_FBCNT_SH 20 +#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH) +enum { + __APP_PLL_SCLK_FBCNT_NORM = 6, + __APP_PLL_SCLK_FBCNT_10G_FC = 10, +}; +#define __APP_PLL_SCLK_EXTFB 0x00000800 +#define __APP_PLL_SCLK_ENOUTS 0x00000400 +#define __APP_PLL_SCLK_RATE 0x00000010 +#define CT2_PCIE_MISC_REG 0x00014804 +#define __ETH_CLK_ENABLE_PORT1 0x00000010 +#define CT2_CHIP_MISC_PRG 0x000148a4 +#define __ETH_CLK_ENABLE_PORT0 0x00004000 +#define __APP_LPU_SPEED 0x00000002 +#define CT2_MBIST_STAT_REG 0x00014818 +#define CT2_MBIST_CTL_REG 0x0001481c +#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c +#define __PMM_1T_PNDB_P 0x00000002 +#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c +#define CT2_WGN_STATUS 0x00014990 +#define __A2T_AHB_LOAD 0x00000800 +#define __WGN_READY 0x00000400 +#define __GLBL_PF_VF_CFG_RDY 0x00000200 +#define CT2_NFC_CSR_CLR_REG 0x00027420 +#define CT2_NFC_CSR_SET_REG 0x00027424 +#define __HALT_NFC_CONTROLLER 0x00000002 +#define __NFC_CONTROLLER_HALTED 0x00001000 + +#define CT2_RSC_GPR15_REG 0x0002765c +#define CT2_CSI_FW_CTL_REG 0x00027080 +#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000 +#define CT2_CSI_FW_CTL_SET_REG 0x00027088 + +#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 +#define __CSI_MAC_RESET 0x00000010 +#define __CSI_MAC_AHB_RESET 0x00000008 +#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4 +#define CT2_CSI_MAC_CONTROL_REG(__n) \ + (CT2_CSI_MAC0_CONTROL_REG + \ + (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG)) + +/* + * Name semaphore registers based on usage + */ +#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG +#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG +#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG +#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG +#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG +#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG + +/* + * CT2 semaphore register locations changed + */ +#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG +#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG +#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG +#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG +#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG +#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG + +#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) +#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) + +/* + * And corresponding host interrupt status bit field defines + */ +#define __HFN_INT_CPE_Q0 0x00000001U +#define __HFN_INT_CPE_Q1 0x00000002U +#define __HFN_INT_CPE_Q2 0x00000004U +#define __HFN_INT_CPE_Q3 0x00000008U +#define __HFN_INT_CPE_Q4 0x00000010U +#define __HFN_INT_CPE_Q5 0x00000020U +#define __HFN_INT_CPE_Q6 0x00000040U +#define __HFN_INT_CPE_Q7 0x00000080U +#define __HFN_INT_RME_Q0 0x00000100U +#define __HFN_INT_RME_Q1 0x00000200U +#define __HFN_INT_RME_Q2 0x00000400U +#define __HFN_INT_RME_Q3 0x00000800U +#define __HFN_INT_RME_Q4 0x00001000U +#define __HFN_INT_RME_Q5 0x00002000U +#define __HFN_INT_RME_Q6 0x00004000U +#define __HFN_INT_RME_Q7 0x00008000U +#define __HFN_INT_ERR_EMC 0x00010000U +#define __HFN_INT_ERR_LPU0 0x00020000U +#define __HFN_INT_ERR_LPU1 0x00040000U +#define __HFN_INT_ERR_PSS 0x00080000U +#define __HFN_INT_MBOX_LPU0 0x00100000U +#define __HFN_INT_MBOX_LPU1 0x00200000U +#define __HFN_INT_MBOX1_LPU0 0x00400000U +#define __HFN_INT_MBOX1_LPU1 0x00800000U +#define __HFN_INT_LL_HALT 0x01000000U +#define __HFN_INT_CPE_MASK 0x000000ffU +#define __HFN_INT_RME_MASK 0x0000ff00U +#define __HFN_INT_ERR_MASK \ + (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \ + __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT) +#define __HFN_INT_FN0_MASK \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0) +#define __HFN_INT_FN1_MASK \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1) + +/* + * Host interrupt status defines for 1860 + */ +#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U +#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U +#define __HFN_INT_ERR_PSS_CT2 0x00040000U +#define __HFN_INT_ERR_LPU0_CT2 0x00080000U +#define __HFN_INT_ERR_LPU1_CT2 0x00100000U +#define __HFN_INT_CPQ_HALT_CT2 0x00200000U +#define __HFN_INT_ERR_WGN_CT2 0x00400000U +#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U +#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U +#define __HFN_INT_ERR_MASK_CT2 \ + (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \ + __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \ + __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \ + __HFN_INT_ERR_LEHTX_CT2) +#define __HFN_INT_FN0_MASK_CT2 \ + (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \ + __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \ + __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2) +#define __HFN_INT_FN1_MASK_CT2 \ + (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \ + __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \ + __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2) + +/* + * asic memory map. + */ +#define PSS_SMEM_PAGE_START 0x8000 +#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15)) +#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff) + +#endif /* __BFI_REG_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna.h b/drivers/net/ethernet/brocade/bna/bna.h new file mode 100644 index 0000000000..50de1532a8 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna.h @@ -0,0 +1,418 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNA_H__ +#define __BNA_H__ + +#include "bfa_defs.h" +#include "bfa_ioc.h" +#include "bfi_enet.h" +#include "bna_types.h" + +extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX]; + +/* Macros and constants */ + +#define bna_is_small_rxq(_id) ((_id) & 0x1) + +/* + * input : _addr-> os dma addr in host endian format, + * output : _bna_dma_addr-> pointer to hw dma addr + */ +#define BNA_SET_DMA_ADDR(_addr, _bna_dma_addr) \ +do { \ + u64 tmp_addr = \ + cpu_to_be64((u64)(_addr)); \ + (_bna_dma_addr)->msb = ((struct bna_dma_addr *)&tmp_addr)->msb; \ + (_bna_dma_addr)->lsb = ((struct bna_dma_addr *)&tmp_addr)->lsb; \ +} while (0) + +/* + * input : _bna_dma_addr-> pointer to hw dma addr + * output : _addr-> os dma addr in host endian format + */ +#define BNA_GET_DMA_ADDR(_bna_dma_addr, _addr) \ +do { \ + (_addr) = ((((u64)ntohl((_bna_dma_addr)->msb))) << 32) \ + | ((ntohl((_bna_dma_addr)->lsb) & 0xffffffff)); \ +} while (0) + +#define BNA_TXQ_WI_NEEDED(_vectors) (((_vectors) + 3) >> 2) + +#define BNA_QE_INDX_ADD(_qe_idx, _qe_num, _q_depth) \ + ((_qe_idx) = ((_qe_idx) + (_qe_num)) & ((_q_depth) - 1)) + +#define BNA_QE_INDX_INC(_idx, _q_depth) BNA_QE_INDX_ADD(_idx, 1, _q_depth) + +#define BNA_Q_INDEX_CHANGE(_old_idx, _updated_idx, _q_depth) \ + (((_updated_idx) - (_old_idx)) & ((_q_depth) - 1)) + +#define BNA_QE_FREE_CNT(_q_ptr, _q_depth) \ + (((_q_ptr)->consumer_index - (_q_ptr)->producer_index - 1) & \ + ((_q_depth) - 1)) +#define BNA_QE_IN_USE_CNT(_q_ptr, _q_depth) \ + ((((_q_ptr)->producer_index - (_q_ptr)->consumer_index)) & \ + (_q_depth - 1)) + +#define BNA_LARGE_PKT_SIZE 1000 + +#define BNA_UPDATE_PKT_CNT(_pkt, _len) \ +do { \ + if ((_len) > BNA_LARGE_PKT_SIZE) { \ + (_pkt)->large_pkt_cnt++; \ + } else { \ + (_pkt)->small_pkt_cnt++; \ + } \ +} while (0) + +#define call_rxf_stop_cbfn(rxf) \ +do { \ + if ((rxf)->stop_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->stop_cbfn; \ + cbarg = (rxf)->stop_cbarg; \ + (rxf)->stop_cbfn = NULL; \ + (rxf)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_start_cbfn(rxf) \ +do { \ + if ((rxf)->start_cbfn) { \ + void (*cbfn)(struct bna_rx *); \ + struct bna_rx *cbarg; \ + cbfn = (rxf)->start_cbfn; \ + cbarg = (rxf)->start_cbarg; \ + (rxf)->start_cbfn = NULL; \ + (rxf)->start_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_rxf_cam_fltr_cbfn(rxf) \ +do { \ + if ((rxf)->cam_fltr_cbfn) { \ + void (*cbfn)(struct bnad *, struct bna_rx *); \ + struct bnad *cbarg; \ + cbfn = (rxf)->cam_fltr_cbfn; \ + cbarg = (rxf)->cam_fltr_cbarg; \ + (rxf)->cam_fltr_cbfn = NULL; \ + (rxf)->cam_fltr_cbarg = NULL; \ + cbfn(cbarg, rxf->rx); \ + } \ +} while (0) + +#define is_xxx_enable(mode, bitmask, xxx) ((bitmask & xxx) && (mode & xxx)) + +#define is_xxx_disable(mode, bitmask, xxx) ((bitmask & xxx) && !(mode & xxx)) + +#define xxx_enable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode |= xxx; \ +} while (0) + +#define xxx_disable(mode, bitmask, xxx) \ +do { \ + bitmask |= xxx; \ + mode &= ~xxx; \ +} while (0) + +#define xxx_inactive(mode, bitmask, xxx) \ +do { \ + bitmask &= ~xxx; \ + mode &= ~xxx; \ +} while (0) + +#define is_promisc_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_promisc_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_PROMISC) + +#define promisc_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_PROMISC) + +#define is_default_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_default_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define default_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_DEFAULT) + +#define is_allmulti_enable(mode, bitmask) \ + is_xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define is_allmulti_disable(mode, bitmask) \ + is_xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_enable(mode, bitmask) \ + xxx_enable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_disable(mode, bitmask) \ + xxx_disable(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define allmulti_inactive(mode, bitmask) \ + xxx_inactive(mode, bitmask, BNA_RXMODE_ALLMULTI) + +#define GET_RXQS(rxp, q0, q1) do { \ + switch ((rxp)->type) { \ + case BNA_RXP_SINGLE: \ + (q0) = rxp->rxq.single.only; \ + (q1) = NULL; \ + break; \ + case BNA_RXP_SLR: \ + (q0) = rxp->rxq.slr.large; \ + (q1) = rxp->rxq.slr.small; \ + break; \ + case BNA_RXP_HDS: \ + (q0) = rxp->rxq.hds.data; \ + (q1) = rxp->rxq.hds.hdr; \ + break; \ + } \ +} while (0) + +#define bna_tx_rid_mask(_bna) ((_bna)->tx_mod.rid_mask) + +#define bna_rx_rid_mask(_bna) ((_bna)->rx_mod.rid_mask) + +#define bna_tx_from_rid(_bna, _rid, _tx) \ +do { \ + struct bna_tx_mod *__tx_mod = &(_bna)->tx_mod; \ + struct bna_tx *__tx; \ + _tx = NULL; \ + list_for_each_entry(__tx, &__tx_mod->tx_active_q, qe) { \ + if (__tx->rid == (_rid)) { \ + (_tx) = __tx; \ + break; \ + } \ + } \ +} while (0) + +#define bna_rx_from_rid(_bna, _rid, _rx) \ +do { \ + struct bna_rx_mod *__rx_mod = &(_bna)->rx_mod; \ + struct bna_rx *__rx; \ + _rx = NULL; \ + list_for_each_entry(__rx, &__rx_mod->rx_active_q, qe) { \ + if (__rx->rid == (_rid)) { \ + (_rx) = __rx; \ + break; \ + } \ + } \ +} while (0) + +#define bna_mcam_mod_free_q(_bna) (&(_bna)->mcam_mod.free_q) + +#define bna_mcam_mod_del_q(_bna) (&(_bna)->mcam_mod.del_q) + +#define bna_ucam_mod_free_q(_bna) (&(_bna)->ucam_mod.free_q) + +#define bna_ucam_mod_del_q(_bna) (&(_bna)->ucam_mod.del_q) + +/* Inline functions */ + +static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr) +{ + struct bna_mac *mac; + + list_for_each_entry(mac, q, qe) + if (ether_addr_equal(mac->addr, addr)) + return mac; + return NULL; +} + +#define bna_attr(_bna) (&(_bna)->ioceth.attr) + +/* Function prototypes */ + +/* BNA */ + +/* FW response handlers */ +void bna_bfi_stats_clr_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNAD */ +void bna_res_req(struct bna_res_info *res_info); +void bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info); +void bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, + struct bna_res_info *res_info); +void bna_mod_init(struct bna *bna, struct bna_res_info *res_info); +void bna_uninit(struct bna *bna); +int bna_num_txq_set(struct bna *bna, int num_txq); +int bna_num_rxp_set(struct bna *bna, int num_rxp); +void bna_hw_stats_get(struct bna *bna); + +/* APIs for RxF */ +struct bna_mac *bna_cam_mod_mac_get(struct list_head *head); +struct bna_mcam_handle *bna_mcam_mod_handle_get(struct bna_mcam_mod *mod); +void bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle); + +/* MBOX */ + +/* API for BNAD */ +void bna_mbox_handler(struct bna *bna, u32 intr_status); + +/* ETHPORT */ + +/* Callbacks for RX */ +void bna_ethport_cb_rx_started(struct bna_ethport *ethport); +void bna_ethport_cb_rx_stopped(struct bna_ethport *ethport); + +/* TX MODULE AND TX */ + +/* FW response handelrs */ +void bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod); + +/* APIs for BNA */ +void bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_tx_mod_uninit(struct bna_tx_mod *tx_mod); + +/* APIs for ENET */ +void bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type); +void bna_tx_mod_fail(struct bna_tx_mod *tx_mod); + +/* APIs for BNAD */ +void bna_tx_res_req(int num_txq, int txq_depth, + struct bna_res_info *res_info); +struct bna_tx *bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_tx_destroy(struct bna_tx *tx); +void bna_tx_enable(struct bna_tx *tx); +void bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)); +void bna_tx_cleanup_complete(struct bna_tx *tx); +void bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo); + +/* RX MODULE, RX, RXF */ + +/* FW response handlers */ +void bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); +void bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr); + +/* APIs for BNA */ +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info); +void bna_rx_mod_uninit(struct bna_rx_mod *rx_mod); + +/* APIs for ENET */ +void bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type); +void bna_rx_mod_fail(struct bna_rx_mod *rx_mod); + +/* APIs for BNAD */ +void bna_rx_res_req(struct bna_rx_config *rx_config, + struct bna_res_info *res_info); +struct bna_rx *bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, void *priv); +void bna_rx_destroy(struct bna_rx *rx); +void bna_rx_enable(struct bna_rx *rx); +void bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)); +void bna_rx_cleanup_complete(struct bna_rx *rx); +void bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo); +void bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]); +void bna_rx_dim_update(struct bna_ccb *ccb); +enum bna_cb_status bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac); +enum bna_cb_status bna_rx_ucast_listset(struct bna_rx *rx, int count, + const u8 *uclist); +enum bna_cb_status bna_rx_mcast_add(struct bna_rx *rx, const u8 *mcmac, + void (*cbfn)(struct bnad *, + struct bna_rx *)); +enum bna_cb_status bna_rx_mcast_listset(struct bna_rx *rx, int count, + const u8 *mcmac); +void +bna_rx_mcast_delall(struct bna_rx *rx); +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode rxmode, + enum bna_rxmode bitmask); +void bna_rx_vlan_add(struct bna_rx *rx, int vlan_id); +void bna_rx_vlan_del(struct bna_rx *rx, int vlan_id); +void bna_rx_vlanfilter_enable(struct bna_rx *rx); +void bna_rx_vlan_strip_enable(struct bna_rx *rx); +void bna_rx_vlan_strip_disable(struct bna_rx *rx); +/* ENET */ + +/* API for RX */ +int bna_enet_mtu_get(struct bna_enet *enet); + +/* Callbacks for TX, RX */ +void bna_enet_cb_tx_stopped(struct bna_enet *enet); +void bna_enet_cb_rx_stopped(struct bna_enet *enet); + +/* API for BNAD */ +void bna_enet_enable(struct bna_enet *enet); +void bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)); +void bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config); +void bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)); +void bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac); + +/* IOCETH */ + +/* APIs for BNAD */ +void bna_ioceth_enable(struct bna_ioceth *ioceth); +void bna_ioceth_disable(struct bna_ioceth *ioceth, + enum bna_cleanup_type type); + +/* BNAD */ + +/* Callbacks for ENET */ +void bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status status); + +/* Callbacks for IOCETH */ +void bnad_cb_ioceth_ready(struct bnad *bnad); +void bnad_cb_ioceth_failed(struct bnad *bnad); +void bnad_cb_ioceth_disabled(struct bnad *bnad); +void bnad_cb_mbox_intr_enable(struct bnad *bnad); +void bnad_cb_mbox_intr_disable(struct bnad *bnad); + +/* Callbacks for BNA */ +void bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats); + +#endif /* __BNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c new file mode 100644 index 0000000000..883de0ac8d --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_enet.c @@ -0,0 +1,2094 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include "bna.h" + +static inline int +ethport_can_be_up(struct bna_ethport *ethport) +{ + int ready = 0; + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + else + ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) && + (ethport->flags & BNA_ETHPORT_F_RX_STARTED) && + !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED)); + return ready; +} + +#define ethport_is_up ethport_can_be_up + +enum bna_ethport_event { + ETHPORT_E_START = 1, + ETHPORT_E_STOP = 2, + ETHPORT_E_FAIL = 3, + ETHPORT_E_UP = 4, + ETHPORT_E_DOWN = 5, + ETHPORT_E_FWRESP_UP_OK = 6, + ETHPORT_E_FWRESP_DOWN = 7, + ETHPORT_E_FWRESP_UP_FAIL = 8, +}; + +enum bna_enet_event { + ENET_E_START = 1, + ENET_E_STOP = 2, + ENET_E_FAIL = 3, + ENET_E_PAUSE_CFG = 4, + ENET_E_MTU_CFG = 5, + ENET_E_FWRESP_PAUSE = 6, + ENET_E_CHLD_STOPPED = 7, +}; + +enum bna_ioceth_event { + IOCETH_E_ENABLE = 1, + IOCETH_E_DISABLE = 2, + IOCETH_E_IOC_RESET = 3, + IOCETH_E_IOC_FAILED = 4, + IOCETH_E_IOC_READY = 5, + IOCETH_E_ENET_ATTR_RESP = 6, + IOCETH_E_ENET_STOPPED = 7, + IOCETH_E_IOC_DISABLED = 8, +}; + +#define bna_stats_copy(_name, _type) \ +do { \ + count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \ + stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \ + stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \ + for (i = 0; i < count; i++) \ + stats_dst[i] = be64_to_cpu(stats_src[i]); \ +} while (0) \ + +/* + * FW response handlers + */ + +static void +bna_bfi_ethport_enable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); +} + +static void +bna_bfi_ethport_disable_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); +} + +static void +bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_enable_req *admin_req = + ðport->bfi_enet_cmd.admin_req; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + switch (admin_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + break; + } +} + +static void +bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_diag_lb_req *diag_lb_req = + ðport->bfi_enet_cmd.lpbk_req; + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + switch (diag_lb_req->enable) { + case BNA_STATUS_T_ENABLED: + if (rsp->error == BFI_ENET_CMD_OK) + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK); + else { + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL); + } + break; + + case BNA_STATUS_T_DISABLED: + bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN); + break; + } +} + +static void +bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE); +} + +static void +bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_attr_rsp *rsp = + container_of(msghdr, struct bfi_enet_attr_rsp, mh); + + /** + * Store only if not set earlier, since BNAD can override the HW + * attributes + */ + if (!ioceth->attr.fw_query_complete) { + ioceth->attr.num_txq = ntohl(rsp->max_cfg); + ioceth->attr.num_rxp = ntohl(rsp->max_cfg); + ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac); + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = ntohl(rsp->rit_size); + ioceth->attr.fw_query_complete = true; + } + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP); +} + +static void +bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + u64 *stats_src; + u64 *stats_dst; + u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask); + u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask); + int count; + int i; + + bna_stats_copy(mac, mac); + bna_stats_copy(bpc, bpc); + bna_stats_copy(rad, rad); + bna_stats_copy(rlb, rad); + bna_stats_copy(fc_rx, fc_rx); + bna_stats_copy(fc_tx, fc_tx); + + stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]); + + /* Copy Rxf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf)); + if (rx_enet_mask & BIT(i)) { + int k; + count = sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + /* Copy Txf stats to SW area, scatter them while copying */ + for (i = 0; i < BFI_ENET_CFG_MAX; i++) { + stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]); + memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf)); + if (tx_enet_mask & BIT(i)) { + int k; + count = sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); + for (k = 0; k < count; k++) { + stats_dst[k] = be64_to_cpu(*stats_src); + stats_src++; + } + } + } + + bna->stats_mod.stats_get_busy = false; + bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats); +} + +static void +bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_UP; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, ethport->link_status); +} + +static void +bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport, + struct bfi_msgq_mhdr *msghdr) +{ + ethport->link_status = BNA_LINK_DOWN; + + /* Dispatch events */ + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); +} + +static void +bna_err_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_HALT_INTR(bna, intr_status)) + bna_halt_clear(bna); + + bfa_nw_ioc_error_isr(&bna->ioceth.ioc); +} + +void +bna_mbox_handler(struct bna *bna, u32 intr_status) +{ + if (BNA_IS_ERR_INTR(bna, intr_status)) { + bna_err_handler(bna, intr_status); + return; + } + if (BNA_IS_MBOX_INTR(bna, intr_status)) + bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc); +} + +static void +bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr) +{ + struct bna *bna = (struct bna *)arg; + struct bna_tx *tx; + struct bna_rx *rx; + + switch (msghdr->msg_id) { + case BFI_ENET_I2H_RX_CFG_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_start_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RX_CFG_CLR_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rx_enet_stop_rsp(rx, msghdr); + break; + + case BFI_ENET_I2H_RIT_CFG_RSP: + case BFI_ENET_I2H_RSS_CFG_RSP: + case BFI_ENET_I2H_RSS_ENABLE_RSP: + case BFI_ENET_I2H_RX_PROMISCUOUS_RSP: + case BFI_ENET_I2H_RX_DEFAULT_RSP: + case BFI_ENET_I2H_MAC_UCAST_CLR_RSP: + case BFI_ENET_I2H_MAC_UCAST_ADD_RSP: + case BFI_ENET_I2H_MAC_UCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_DEL_RSP: + case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP: + case BFI_ENET_I2H_RX_VLAN_SET_RSP: + case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_MAC_UCAST_SET_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_MAC_MCAST_ADD_RSP: + bna_rx_from_rid(bna, msghdr->enet_id, rx); + if (rx) + bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_SET_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_start_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_TX_CFG_CLR_RSP: + bna_tx_from_rid(bna, msghdr->enet_id, tx); + if (tx) + bna_bfi_tx_enet_stop_rsp(tx, msghdr); + break; + + case BFI_ENET_I2H_PORT_ADMIN_RSP: + bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_DIAG_LOOPBACK_RSP: + bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_SET_PAUSE_RSP: + bna_bfi_pause_set_rsp(&bna->enet, msghdr); + break; + + case BFI_ENET_I2H_GET_ATTR_RSP: + bna_bfi_attr_get_rsp(&bna->ioceth, msghdr); + break; + + case BFI_ENET_I2H_STATS_GET_RSP: + bna_bfi_stats_get_rsp(bna, msghdr); + break; + + case BFI_ENET_I2H_STATS_CLR_RSP: + /* No-op */ + break; + + case BFI_ENET_I2H_LINK_UP_AEN: + bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_LINK_DOWN_AEN: + bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_ENABLE_AEN: + bna_bfi_ethport_enable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_PORT_DISABLE_AEN: + bna_bfi_ethport_disable_aen(&bna->ethport, msghdr); + break; + + case BFI_ENET_I2H_BW_UPDATE_AEN: + bna_bfi_bw_update_aen(&bna->tx_mod); + break; + + default: + break; + } +} + +/* ETHPORT */ + +#define call_ethport_stop_cbfn(_ethport) \ +do { \ + if ((_ethport)->stop_cbfn) { \ + void (*cbfn)(struct bna_enet *); \ + cbfn = (_ethport)->stop_cbfn; \ + (_ethport)->stop_cbfn = NULL; \ + cbfn(&(_ethport)->bna->enet); \ + } \ +} while (0) + +#define call_ethport_adminup_cbfn(ethport, status) \ +do { \ + if ((ethport)->adminup_cbfn) { \ + void (*cbfn)(struct bnad *, enum bna_cb_status); \ + cbfn = (ethport)->adminup_cbfn; \ + (ethport)->adminup_cbfn = NULL; \ + cbfn((ethport)->bna->bnad, status); \ + } \ +} while (0) + +static void +bna_bfi_ethport_admin_up(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_up_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_admin_down(struct bna_ethport *ethport) +{ + struct bfi_enet_enable_req *admin_down_req = + ðport->bfi_enet_cmd.admin_req; + + bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0); + admin_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + admin_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &admin_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_up_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_up_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_up_req->mode = (ethport->bna->enet.type == + BNA_ENET_T_LOOPBACK_INTERNAL) ? + BFI_ENET_DIAG_LB_OPMODE_EXT : + BFI_ENET_DIAG_LB_OPMODE_CBL; + lpbk_up_req->enable = BNA_STATUS_T_ENABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport) +{ + struct bfi_enet_diag_lb_req *lpbk_down_req = + ðport->bfi_enet_cmd.lpbk_req; + + bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0); + lpbk_down_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req))); + lpbk_down_req->enable = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh); + bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd); +} + +static void +bna_bfi_ethport_up(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_up(ethport); + else + bna_bfi_ethport_lpbk_up(ethport); +} + +static void +bna_bfi_ethport_down(struct bna_ethport *ethport) +{ + if (ethport->bna->enet.type == BNA_ENET_T_REGULAR) + bna_bfi_ethport_admin_down(ethport); + else + bna_bfi_ethport_lpbk_down(ethport); +} + +bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport, + enum bna_ethport_event); +bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport, + enum bna_ethport_event); + +static void +bna_ethport_sm_stopped_entry(struct bna_ethport *ethport) +{ + call_ethport_stop_cbfn(ethport); +} + +static void +bna_ethport_sm_stopped(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_START: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_STOP: + call_ethport_stop_cbfn(ethport); + break; + + case ETHPORT_E_FAIL: + /* No-op */ + break; + + case ETHPORT_E_DOWN: + /* This event is received due to Rx objects failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_down(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT); + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS); + bfa_fsm_set_state(ethport, bna_ethport_sm_up); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL); + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + case ETHPORT_E_FWRESP_DOWN: + /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */ + bna_bfi_ethport_up(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport) +{ + /** + * NOTE: Do not call bna_bfi_ethport_down() here. That will over step + * mbox due to up_resp_wait -> down_resp_wait transition on event + * ETHPORT_E_DOWN + */ +} + +static void +bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_UP: + bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait); + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_up_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_up(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_STOP: + bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait); + bna_bfi_ethport_down(ethport); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport) +{ +} + +static void +bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport, + enum bna_ethport_event event) +{ + switch (event) { + case ETHPORT_E_FAIL: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + case ETHPORT_E_DOWN: + /** + * This event is received due to Rx objects stopping in + * parallel to ethport + */ + /* No-op */ + break; + + case ETHPORT_E_FWRESP_UP_OK: + /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */ + bna_bfi_ethport_down(ethport); + break; + + case ETHPORT_E_FWRESP_UP_FAIL: + case ETHPORT_E_FWRESP_DOWN: + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ethport_init(struct bna_ethport *ethport, struct bna *bna) +{ + ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED); + ethport->bna = bna; + + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn = bnad_cb_ethport_link_status; + + ethport->rx_started_count = 0; + + ethport->stop_cbfn = NULL; + ethport->adminup_cbfn = NULL; + + bfa_fsm_set_state(ethport, bna_ethport_sm_stopped); +} + +static void +bna_ethport_uninit(struct bna_ethport *ethport) +{ + ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP; + ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED; + + ethport->bna = NULL; +} + +static void +bna_ethport_start(struct bna_ethport *ethport) +{ + bfa_fsm_send_event(ethport, ETHPORT_E_START); +} + +static void +bna_enet_cb_ethport_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +static void +bna_ethport_stop(struct bna_ethport *ethport) +{ + ethport->stop_cbfn = bna_enet_cb_ethport_stopped; + bfa_fsm_send_event(ethport, ETHPORT_E_STOP); +} + +static void +bna_ethport_fail(struct bna_ethport *ethport) +{ + /* Reset the physical port status to enabled */ + ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED; + + if (ethport->link_status != BNA_LINK_DOWN) { + ethport->link_status = BNA_LINK_DOWN; + ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN); + } + bfa_fsm_send_event(ethport, ETHPORT_E_FAIL); +} + +/* Should be called only when ethport is disabled */ +void +bna_ethport_cb_rx_started(struct bna_ethport *ethport) +{ + ethport->rx_started_count++; + + if (ethport->rx_started_count == 1) { + ethport->flags |= BNA_ETHPORT_F_RX_STARTED; + + if (ethport_can_be_up(ethport)) + bfa_fsm_send_event(ethport, ETHPORT_E_UP); + } +} + +void +bna_ethport_cb_rx_stopped(struct bna_ethport *ethport) +{ + int ethport_up = ethport_is_up(ethport); + + ethport->rx_started_count--; + + if (ethport->rx_started_count == 0) { + ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED; + + if (ethport_up) + bfa_fsm_send_event(ethport, ETHPORT_E_DOWN); + } +} + +/* ENET */ + +#define bna_enet_chld_start(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_ethport_start(&(enet)->bna->ethport); \ + bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_chld_stop(enet) \ +do { \ + enum bna_tx_type tx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_ethport_stop(&(enet)->bna->ethport); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define bna_enet_chld_fail(enet) \ +do { \ + bna_ethport_fail(&(enet)->bna->ethport); \ + bna_tx_mod_fail(&(enet)->bna->tx_mod); \ + bna_rx_mod_fail(&(enet)->bna->rx_mod); \ +} while (0) + +#define bna_enet_rx_start(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \ +} while (0) + +#define bna_enet_rx_stop(enet) \ +do { \ + enum bna_rx_type rx_type = \ + ((enet)->type == BNA_ENET_T_REGULAR) ? \ + BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \ + bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\ + bfa_wc_up(&(enet)->chld_stop_wc); \ + bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \ + bfa_wc_wait(&(enet)->chld_stop_wc); \ +} while (0) + +#define call_enet_stop_cbfn(enet) \ +do { \ + if ((enet)->stop_cbfn) { \ + void (*cbfn)(void *); \ + void *cbarg; \ + cbfn = (enet)->stop_cbfn; \ + cbarg = (enet)->stop_cbarg; \ + (enet)->stop_cbfn = NULL; \ + (enet)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define call_enet_mtu_cbfn(enet) \ +do { \ + if ((enet)->mtu_cbfn) { \ + void (*cbfn)(struct bnad *); \ + cbfn = (enet)->mtu_cbfn; \ + (enet)->mtu_cbfn = NULL; \ + cbfn((enet)->bna->bnad); \ + } \ +} while (0) + +static void bna_enet_cb_chld_stopped(void *arg); +static void bna_bfi_pause_set(struct bna_enet *enet); + +bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, started, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet, + enum bna_enet_event); +bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet, + enum bna_enet_event); + +static void +bna_enet_sm_stopped_entry(struct bna_enet *enet) +{ + call_enet_mtu_cbfn(enet); + call_enet_stop_cbfn(enet); +} + +static void +bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event) +{ + switch (event) { + case ENET_E_START: + bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait); + break; + + case ENET_E_STOP: + call_enet_stop_cbfn(enet); + break; + + case ENET_E_FAIL: + /* No-op */ + break; + + case ENET_E_PAUSE_CFG: + break; + + case ENET_E_MTU_CFG: + call_enet_mtu_cbfn(enet); + break; + + case ENET_E_CHLD_STOPPED: + /** + * This event is received due to Ethport, Tx and Rx objects + * failing + */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet) +{ + bna_bfi_pause_set(enet); +} + +static void +bna_enet_sm_pause_init_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + /* No-op */ + break; + + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + bna_enet_chld_start(enet); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; +} + +static void +bna_enet_sm_last_resp_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + case ENET_E_FWRESP_PAUSE: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_started_entry(struct bna_enet *enet) +{ + /** + * NOTE: Do not call bna_enet_chld_start() here, since it will be + * inadvertently called during cfg_wait->started transition as well + */ + call_enet_mtu_cbfn(enet); +} + +static void +bna_enet_sm_started(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_bfi_pause_set(enet); + break; + + case ENET_E_MTU_CFG: + bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait); + bna_enet_rx_stop(enet); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_wait_entry(struct bna_enet *enet) +{ +} + +static void +bna_enet_sm_cfg_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_STOP: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait); + break; + + case ENET_E_FAIL: + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_PAUSE_CFG: + enet->flags |= BNA_ENET_F_PAUSE_CHANGED; + break; + + case ENET_E_MTU_CFG: + enet->flags |= BNA_ENET_F_MTU_CHANGED; + break; + + case ENET_E_CHLD_STOPPED: + bna_enet_rx_start(enet); + fallthrough; + case ENET_E_FWRESP_PAUSE: + if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) { + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + bna_bfi_pause_set(enet); + } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) { + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; + bna_enet_rx_stop(enet); + } else { + bfa_fsm_set_state(enet, bna_enet_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED; + enet->flags &= ~BNA_ENET_F_MTU_CHANGED; +} + +static void +bna_enet_sm_cfg_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_FWRESP_PAUSE: + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet) +{ + bna_enet_chld_stop(enet); +} + +static void +bna_enet_sm_chld_stop_wait(struct bna_enet *enet, + enum bna_enet_event event) +{ + switch (event) { + case ENET_E_FAIL: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + bna_enet_chld_fail(enet); + break; + + case ENET_E_CHLD_STOPPED: + bfa_fsm_set_state(enet, bna_enet_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_pause_set(struct bna_enet *enet) +{ + struct bfi_enet_set_pause_req *pause_req = &enet->pause_req; + + bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0); + pause_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req))); + pause_req->tx_pause = enet->pause_config.tx_pause; + pause_req->rx_pause = enet->pause_config.rx_pause; + + bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_set_pause_req), &pause_req->mh); + bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd); +} + +static void +bna_enet_cb_chld_stopped(void *arg) +{ + struct bna_enet *enet = (struct bna_enet *)arg; + + bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED); +} + +static void +bna_enet_init(struct bna_enet *enet, struct bna *bna) +{ + enet->bna = bna; + enet->flags = 0; + enet->mtu = 0; + enet->type = BNA_ENET_T_REGULAR; + + enet->stop_cbfn = NULL; + enet->stop_cbarg = NULL; + + enet->mtu_cbfn = NULL; + + bfa_fsm_set_state(enet, bna_enet_sm_stopped); +} + +static void +bna_enet_uninit(struct bna_enet *enet) +{ + enet->flags = 0; + + enet->bna = NULL; +} + +static void +bna_enet_start(struct bna_enet *enet) +{ + enet->flags |= BNA_ENET_F_IOCETH_READY; + if (enet->flags & BNA_ENET_F_ENABLED) + bfa_fsm_send_event(enet, ENET_E_START); +} + +static void +bna_ioceth_cb_enet_stopped(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED); +} + +static void +bna_enet_stop(struct bna_enet *enet) +{ + enet->stop_cbfn = bna_ioceth_cb_enet_stopped; + enet->stop_cbarg = &enet->bna->ioceth; + + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +static void +bna_enet_fail(struct bna_enet *enet) +{ + enet->flags &= ~BNA_ENET_F_IOCETH_READY; + bfa_fsm_send_event(enet, ENET_E_FAIL); +} + +void +bna_enet_cb_tx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +void +bna_enet_cb_rx_stopped(struct bna_enet *enet) +{ + bfa_wc_down(&enet->chld_stop_wc); +} + +int +bna_enet_mtu_get(struct bna_enet *enet) +{ + return enet->mtu; +} + +void +bna_enet_enable(struct bna_enet *enet) +{ + if (enet->fsm != bna_enet_sm_stopped) + return; + + enet->flags |= BNA_ENET_F_ENABLED; + + if (enet->flags & BNA_ENET_F_IOCETH_READY) + bfa_fsm_send_event(enet, ENET_E_START); +} + +void +bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type, + void (*cbfn)(void *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(enet->bna->bnad); + return; + } + + enet->stop_cbfn = cbfn; + enet->stop_cbarg = enet->bna->bnad; + + enet->flags &= ~BNA_ENET_F_ENABLED; + + bfa_fsm_send_event(enet, ENET_E_STOP); +} + +void +bna_enet_pause_config(struct bna_enet *enet, + struct bna_pause_config *pause_config) +{ + enet->pause_config = *pause_config; + + bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG); +} + +void +bna_enet_mtu_set(struct bna_enet *enet, int mtu, + void (*cbfn)(struct bnad *)) +{ + enet->mtu = mtu; + + enet->mtu_cbfn = cbfn; + + bfa_fsm_send_event(enet, ENET_E_MTU_CFG); +} + +void +bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac) +{ + bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac); +} + +/* IOCETH */ + +#define enable_mbox_intr(_ioceth) \ +do { \ + u32 intr_status; \ + bna_intr_status_get((_ioceth)->bna, intr_status); \ + bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \ + bna_mbox_intr_enable((_ioceth)->bna); \ +} while (0) + +#define disable_mbox_intr(_ioceth) \ +do { \ + bna_mbox_intr_disable((_ioceth)->bna); \ + bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \ +} while (0) + +#define call_ioceth_stop_cbfn(_ioceth) \ +do { \ + if ((_ioceth)->stop_cbfn) { \ + void (*cbfn)(struct bnad *); \ + struct bnad *cbarg; \ + cbfn = (_ioceth)->stop_cbfn; \ + cbarg = (_ioceth)->stop_cbarg; \ + (_ioceth)->stop_cbfn = NULL; \ + (_ioceth)->stop_cbarg = NULL; \ + cbfn(cbarg); \ + } \ +} while (0) + +#define bna_stats_mod_uninit(_stats_mod) \ +do { \ +} while (0) + +#define bna_stats_mod_start(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = true; \ +} while (0) + +#define bna_stats_mod_stop(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ +} while (0) + +#define bna_stats_mod_fail(_stats_mod) \ +do { \ + (_stats_mod)->ioc_ready = false; \ + (_stats_mod)->stats_get_busy = false; \ + (_stats_mod)->stats_clr_busy = false; \ +} while (0) + +static void bna_bfi_attr_get(struct bna_ioceth *ioceth); + +bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth, + enum bna_ioceth_event); +bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth, + enum bna_ioceth_event); + +static void +bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth) +{ + call_ioceth_stop_cbfn(ioceth); +} + +static void +bna_ioceth_sm_stopped(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_ENABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + bfa_nw_ioc_enable(&ioceth->ioc); + break; + + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth) +{ + /** + * Do not call bfa_nw_ioc_enable() here. It must be called in the + * previous state due to failed -> ioc_ready_wait transition. + */ +} + +static void +bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_IOC_READY: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth) +{ + bna_bfi_attr_get(ioceth); +} + +static void +bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth) +{ + bna_enet_start(&ioceth->bna->enet); + bna_stats_mod_start(&ioceth->bna->stats_mod); + bnad_cb_ioceth_ready(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait); + break; + + case IOCETH_E_IOC_FAILED: + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_ATTR_RESP: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth) +{ + bna_stats_mod_stop(&ioceth->bna->stats_mod); + bna_enet_stop(&ioceth->bna->enet); +} + +static void +bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_FAILED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + disable_mbox_intr(ioceth); + bna_enet_fail(&ioceth->bna->enet); + bna_stats_mod_fail(&ioceth->bna->stats_mod); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_ENET_STOPPED: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth) +{ +} + +static void +bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_IOC_DISABLED: + disable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); + break; + + case IOCETH_E_ENET_STOPPED: + /* This event is received due to enet failing */ + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth) +{ + bnad_cb_ioceth_failed(ioceth->bna->bnad); +} + +static void +bna_ioceth_sm_failed(struct bna_ioceth *ioceth, + enum bna_ioceth_event event) +{ + switch (event) { + case IOCETH_E_DISABLE: + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait); + bfa_nw_ioc_disable(&ioceth->ioc); + break; + + case IOCETH_E_IOC_RESET: + enable_mbox_intr(ioceth); + bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait); + break; + + case IOCETH_E_IOC_FAILED: + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_attr_get(struct bna_ioceth *ioceth) +{ + struct bfi_enet_attr_req *attr_req = &ioceth->attr_req; + + bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_GET_ATTR_REQ, 0, 0); + attr_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req))); + bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_attr_req), &attr_req->mh); + bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd); +} + +/* IOC callback functions */ + +static void +bna_cb_ioceth_enable(void *arg, enum bfa_status error) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + if (error) + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); + else + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY); +} + +static void +bna_cb_ioceth_disable(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED); +} + +static void +bna_cb_ioceth_hbfail(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED); +} + +static void +bna_cb_ioceth_reset(void *arg) +{ + struct bna_ioceth *ioceth = (struct bna_ioceth *)arg; + + bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET); +} + +static struct bfa_ioc_cbfn bna_ioceth_cbfn = { + .enable_cbfn = bna_cb_ioceth_enable, + .disable_cbfn = bna_cb_ioceth_disable, + .hbfail_cbfn = bna_cb_ioceth_hbfail, + .reset_cbfn = bna_cb_ioceth_reset +}; + +static void bna_attr_init(struct bna_ioceth *ioceth) +{ + ioceth->attr.num_txq = BFI_ENET_DEF_TXQ; + ioceth->attr.num_rxp = BFI_ENET_DEF_RXP; + ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM; + ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM; + ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ; + ioceth->attr.fw_query_complete = false; +} + +static void +bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna, + struct bna_res_info *res_info) +{ + u64 dma; + u8 *kva; + + ioceth->bna = bna; + + /** + * Attach IOC and claim: + * 1. DMA memory for IOC attributes + * 2. Kernel memory for FW trace + */ + bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn); + bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH); + + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma); + + kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva; + bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva); + + /** + * Attach common modules (Diag, SFP, CEE, Port) and claim respective + * DMA memory. + */ + BNA_GET_DMA_ADDR( + &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma); + kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva; + bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna); + bfa_nw_cee_mem_claim(&bna->cee, kva, dma); + kva += bfa_nw_cee_meminfo(); + dma += bfa_nw_cee_meminfo(); + + bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna); + bfa_nw_flash_memclaim(&bna->flash, kva, dma); + kva += bfa_nw_flash_meminfo(); + dma += bfa_nw_flash_meminfo(); + + bfa_msgq_attach(&bna->msgq, &ioceth->ioc); + bfa_msgq_memclaim(&bna->msgq, kva, dma); + bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna); + kva += bfa_msgq_meminfo(); + dma += bfa_msgq_meminfo(); + + ioceth->stop_cbfn = NULL; + ioceth->stop_cbarg = NULL; + + bna_attr_init(ioceth); + + bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped); +} + +static void +bna_ioceth_uninit(struct bna_ioceth *ioceth) +{ + bfa_nw_ioc_detach(&ioceth->ioc); + + ioceth->bna = NULL; +} + +void +bna_ioceth_enable(struct bna_ioceth *ioceth) +{ + if (ioceth->fsm == bna_ioceth_sm_ready) { + bnad_cb_ioceth_ready(ioceth->bna->bnad); + return; + } + + if (ioceth->fsm == bna_ioceth_sm_stopped) + bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE); +} + +void +bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type) +{ + if (type == BNA_SOFT_CLEANUP) { + bnad_cb_ioceth_disabled(ioceth->bna->bnad); + return; + } + + ioceth->stop_cbfn = bnad_cb_ioceth_disabled; + ioceth->stop_cbarg = ioceth->bna->bnad; + + bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE); +} + +static void +bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + ucam_mod->ucmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&ucam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q); + + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&ucam_mod->del_q); + for (; i < (bna->ioceth.attr.num_ucmac * 2); i++) + list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q); + + ucam_mod->bna = bna; +} + +static void +bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod) +{ + ucam_mod->bna = NULL; +} + +static void +bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + mcam_mod->mcmac = (struct bna_mac *) + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q); + + mcam_mod->mchandle = (struct bna_mcam_handle *) + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&mcam_mod->free_handle_q); + for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) + list_add_tail(&mcam_mod->mchandle[i].qe, + &mcam_mod->free_handle_q); + + /* A separate queue to allow synchronous setting of a list of MACs */ + INIT_LIST_HEAD(&mcam_mod->del_q); + for (; i < (bna->ioceth.attr.num_mcmac * 2); i++) + list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q); + + mcam_mod->bna = bna; +} + +static void +bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod) +{ + mcam_mod->bna = NULL; +} + +static void +bna_bfi_stats_get(struct bna *bna) +{ + struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get; + + bna->stats_mod.stats_get_busy = true; + + bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_STATS_GET_REQ, 0, 0); + stats_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req))); + stats_req->stats_mask = htons(BFI_ENET_STATS_ALL); + stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask); + stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask); + stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb; + stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb; + + bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL, + sizeof(struct bfi_enet_stats_req), &stats_req->mh); + bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd); +} + +void +bna_res_req(struct bna_res_info *res_info) +{ + /* DMA memory for COMMON_MODULE */ + res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN( + (bfa_nw_cee_meminfo() + + bfa_nw_flash_meminfo() + + bfa_msgq_meminfo()), PAGE_SIZE); + + /* DMA memory for retrieving IOC attributes */ + res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len = + ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE); + + /* Virtual memory for retreiving fw_trc */ + res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN; + + /* DMA memory for retreiving stats */ + res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1; + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len = + ALIGN(sizeof(struct bfi_enet_stats), + PAGE_SIZE); +} + +void +bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info) +{ + struct bna_attr *attr = &bna->ioceth.attr; + + /* Virtual memory for Tx objects - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_tx); + + /* Virtual memory for TxQ - stored by Tx module */ + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len = + attr->num_txq * sizeof(struct bna_txq); + + /* Virtual memory for Rx objects - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rx); + + /* Virtual memory for RxPath - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len = + attr->num_rxp * sizeof(struct bna_rxp); + + /* Virtual memory for RxQ - stored by Rx module */ + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len = + (attr->num_rxp * 2) * sizeof(struct bna_rxq); + + /* Virtual memory for Unicast MAC address - stored by ucam module */ + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len = + (attr->num_ucmac * 2) * sizeof(struct bna_mac); + + /* Virtual memory for Multicast MAC address - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len = + (attr->num_mcmac * 2) * sizeof(struct bna_mac); + + /* Virtual memory for Multicast handle - stored by mcam module */ + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type = + BNA_MEM_T_KVA; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1; + res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len = + attr->num_mcmac * sizeof(struct bna_mcam_handle); +} + +void +bna_init(struct bna *bna, struct bnad *bnad, + struct bfa_pcidev *pcidev, struct bna_res_info *res_info) +{ + bna->bnad = bnad; + bna->pcidev = *pcidev; + + bna->stats.hw_stats_kva = (struct bfi_enet_stats *) + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva; + bna->stats.hw_stats_dma.msb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb; + bna->stats.hw_stats_dma.lsb = + res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb; + + bna_reg_addr_init(bna, &bna->pcidev); + + /* Also initializes diag, cee, sfp, phy_port, msgq */ + bna_ioceth_init(&bna->ioceth, bna, res_info); + + bna_enet_init(&bna->enet, bna); + bna_ethport_init(&bna->ethport, bna); +} + +void +bna_mod_init(struct bna *bna, struct bna_res_info *res_info) +{ + bna_tx_mod_init(&bna->tx_mod, bna, res_info); + + bna_rx_mod_init(&bna->rx_mod, bna, res_info); + + bna_ucam_mod_init(&bna->ucam_mod, bna, res_info); + + bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); + + bna->default_mode_rid = BFI_INVALID_RID; + bna->promisc_rid = BFI_INVALID_RID; + + bna->mod_flags |= BNA_MOD_F_INIT_DONE; +} + +void +bna_uninit(struct bna *bna) +{ + if (bna->mod_flags & BNA_MOD_F_INIT_DONE) { + bna_mcam_mod_uninit(&bna->mcam_mod); + bna_ucam_mod_uninit(&bna->ucam_mod); + bna_rx_mod_uninit(&bna->rx_mod); + bna_tx_mod_uninit(&bna->tx_mod); + bna->mod_flags &= ~BNA_MOD_F_INIT_DONE; + } + + bna_stats_mod_uninit(&bna->stats_mod); + bna_ethport_uninit(&bna->ethport); + bna_enet_uninit(&bna->enet); + + bna_ioceth_uninit(&bna->ioceth); + + bna->bnad = NULL; +} + +int +bna_num_txq_set(struct bna *bna, int num_txq) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_txq <= bna->ioceth.attr.num_txq)) { + bna->ioceth.attr.num_txq = num_txq; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +int +bna_num_rxp_set(struct bna *bna, int num_rxp) +{ + if (bna->ioceth.attr.fw_query_complete && + (num_rxp <= bna->ioceth.attr.num_rxp)) { + bna->ioceth.attr.num_rxp = num_rxp; + return BNA_CB_SUCCESS; + } + + return BNA_CB_FAIL; +} + +struct bna_mac * +bna_cam_mod_mac_get(struct list_head *head) +{ + struct bna_mac *mac; + + mac = list_first_entry_or_null(head, struct bna_mac, qe); + if (mac) + list_del(&mac->qe); + + return mac; +} + +struct bna_mcam_handle * +bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod) +{ + struct bna_mcam_handle *handle; + + handle = list_first_entry_or_null(&mcam_mod->free_handle_q, + struct bna_mcam_handle, qe); + if (handle) + list_del(&handle->qe); + + return handle; +} + +void +bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod, + struct bna_mcam_handle *handle) +{ + list_add_tail(&handle->qe, &mcam_mod->free_handle_q); +} + +void +bna_hw_stats_get(struct bna *bna) +{ + if (!bna->stats_mod.ioc_ready) { + bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats); + return; + } + if (bna->stats_mod.stats_get_busy) { + bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats); + return; + } + + bna_bfi_stats_get(bna); +} diff --git a/drivers/net/ethernet/brocade/bna/bna_hw_defs.h b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h new file mode 100644 index 0000000000..dc34e38f97 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_hw_defs.h @@ -0,0 +1,403 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +/* File for interrupt macros and functions */ + +#ifndef __BNA_HW_DEFS_H__ +#define __BNA_HW_DEFS_H__ + +#include "bfi_reg.h" + +/* SW imposed limits */ + +#define BFI_ENET_DEF_TXQ 1 +#define BFI_ENET_DEF_RXP 1 +#define BFI_ENET_DEF_UCAM 1 +#define BFI_ENET_DEF_RITSZ 1 + +#define BFI_ENET_MAX_MCAM 256 + +#define BFI_INVALID_RID -1 + +#define BFI_IBIDX_SIZE 4 + +#define BFI_VLAN_WORD_SHIFT 5 /* 32 bits */ +#define BFI_VLAN_WORD_MASK 0x1F +#define BFI_VLAN_BLOCK_SHIFT 9 /* 512 bits */ +#define BFI_VLAN_BMASK_ALL 0xFF + +#define BFI_COALESCING_TIMER_UNIT 5 /* 5us */ +#define BFI_MAX_COALESCING_TIMEO 0xFF /* in 5us units */ +#define BFI_MAX_INTERPKT_COUNT 0xFF +#define BFI_MAX_INTERPKT_TIMEO 0xF /* in 0.5us units */ +#define BFI_TX_COALESCING_TIMEO 20 /* 20 * 5 = 100us */ +#define BFI_TX_INTERPKT_COUNT 12 /* Pkt Cnt = 12 */ +#define BFI_TX_INTERPKT_TIMEO 15 /* 15 * 0.5 = 7.5us */ +#define BFI_RX_COALESCING_TIMEO 12 /* 12 * 5 = 60us */ +#define BFI_RX_INTERPKT_COUNT 6 /* Pkt Cnt = 6 */ +#define BFI_RX_INTERPKT_TIMEO 3 /* 3 * 0.5 = 1.5us */ + +#define BFI_TXQ_WI_SIZE 64 /* bytes */ +#define BFI_RXQ_WI_SIZE 8 /* bytes */ +#define BFI_CQ_WI_SIZE 16 /* bytes */ +#define BFI_TX_MAX_WRR_QUOTA 0xFFF + +#define BFI_TX_MAX_VECTORS_PER_WI 4 +#define BFI_TX_MAX_VECTORS_PER_PKT 0xFF +#define BFI_TX_MAX_DATA_PER_VECTOR 0xFFFF +#define BFI_TX_MAX_DATA_PER_PKT 0xFFFFFF + +/* Small Q buffer size */ +#define BFI_SMALL_RXBUF_SIZE 128 + +#define BFI_TX_MAX_PRIO 8 +#define BFI_TX_PRIO_MAP_ALL 0xFF + +/* + * + * Register definitions and macros + * + */ + +#define BNA_PCI_REG_CT_ADDRSZ (0x40000) + +#define ct_reg_addr_init(_bna, _pcidev) \ +{ \ + struct bna_reg_offset reg_offset[] = \ + {{HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK}, \ + {HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, \ + {HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, \ + {HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK} }; \ + \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_status;\ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + reg_offset[(_pcidev)->pci_func].fn_int_mask;\ +} + +#define ct_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0 | \ + __HFN_INT_MBOX_LPU1); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK); \ + (_bna)->bits.halt_status_bits = __HFN_INT_LL_HALT; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_LL_HALT; \ +} + +#define ct2_reg_addr_init(_bna, _pcidev) \ +{ \ + (_bna)->regs.fn_int_status = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INT_STATUS; \ + (_bna)->regs.fn_int_mask = (_pcidev)->pci_bar_kva + \ + CT2_HOSTFN_INTR_MASK; \ +} + +#define ct2_bit_defn_init(_bna, _pcidev) \ +{ \ + (_bna)->bits.mbox_status_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.mbox_mask_bits = (__HFN_INT_MBOX_LPU0_CT2 | \ + __HFN_INT_MBOX_LPU1_CT2); \ + (_bna)->bits.error_status_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.error_mask_bits = (__HFN_INT_ERR_MASK_CT2); \ + (_bna)->bits.halt_status_bits = __HFN_INT_CPQ_HALT_CT2; \ + (_bna)->bits.halt_mask_bits = __HFN_INT_CPQ_HALT_CT2; \ +} + +#define bna_reg_addr_init(_bna, _pcidev) \ +{ \ + switch ((_pcidev)->device_id) { \ + case PCI_DEVICE_ID_BROCADE_CT: \ + ct_reg_addr_init((_bna), (_pcidev)); \ + ct_bit_defn_init((_bna), (_pcidev)); \ + break; \ + case BFA_PCI_DEVICE_ID_CT2: \ + ct2_reg_addr_init((_bna), (_pcidev)); \ + ct2_bit_defn_init((_bna), (_pcidev)); \ + break; \ + } \ +} + +#define bna_port_id_get(_bna) ((_bna)->ioceth.ioc.port_id) + +/* Interrupt related bits, flags and macros */ + +#define IB_STATUS_BITS 0x0000ffff + +#define BNA_IS_MBOX_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.mbox_status_bits) + +#define BNA_IS_HALT_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.halt_status_bits) + +#define BNA_IS_ERR_INTR(_bna, _intr_status) \ + ((_intr_status) & (_bna)->bits.error_status_bits) + +#define BNA_IS_MBOX_ERR_INTR(_bna, _intr_status) \ + (BNA_IS_MBOX_INTR(_bna, _intr_status) | \ + BNA_IS_ERR_INTR(_bna, _intr_status)) + +#define BNA_IS_INTX_DATA_INTR(_intr_status) \ + ((_intr_status) & IB_STATUS_BITS) + +#define bna_halt_clear(_bna) \ +do { \ + u32 init_halt; \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt &= ~__FW_INIT_HALT_P; \ + writel(init_halt, (_bna)->ioceth.ioc.ioc_regs.ll_halt); \ + init_halt = readl((_bna)->ioceth.ioc.ioc_regs.ll_halt); \ +} while (0) + +#define bna_intx_disable(_bna, _cur_mask) \ +{ \ + (_cur_mask) = readl((_bna)->regs.fn_int_mask); \ + writel(0xffffffff, (_bna)->regs.fn_int_mask); \ +} + +#define bna_intx_enable(bna, new_mask) \ + writel((new_mask), (bna)->regs.fn_int_mask) +#define bna_mbox_intr_disable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask | (bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits), (bna)->regs.fn_int_mask); \ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_mbox_intr_enable(bna) \ +do { \ + u32 mask; \ + mask = readl((bna)->regs.fn_int_mask); \ + writel((mask & ~((bna)->bits.mbox_mask_bits | \ + (bna)->bits.error_mask_bits)), (bna)->regs.fn_int_mask);\ + mask = readl((bna)->regs.fn_int_mask); \ +} while (0) + +#define bna_intr_status_get(_bna, _status) \ +{ \ + (_status) = readl((_bna)->regs.fn_int_status); \ + if (_status) { \ + writel(((_status) & ~(_bna)->bits.mbox_status_bits), \ + (_bna)->regs.fn_int_status); \ + } \ +} + +/* + * MAX ACK EVENTS : No. of acks that can be accumulated in driver, + * before acking to h/w. The no. of bits is 16 in the doorbell register, + * however we keep this limited to 15 bits. + * This is because around the edge of 64K boundary (16 bits), one + * single poll can make the accumulated ACK counter cross the 64K boundary, + * causing problems, when we try to ack with a value greater than 64K. + * 15 bits (32K) should be large enough to accumulate, anyways, and the max. + * acked events to h/w can be (32K + max poll weight) (currently 64). + */ +#define BNA_IB_MAX_ACK_EVENTS BIT(15) + +/* These macros build the data portion of the TxQ/RxQ doorbell */ +#define BNA_DOORBELL_Q_PRD_IDX(_pi) (0x80000000 | (_pi)) +#define BNA_DOORBELL_Q_STOP (0x40000000) + +/* These macros build the data portion of the IB doorbell */ +#define BNA_DOORBELL_IB_INT_ACK(_timeout, _events) \ + (0x80000000 | ((_timeout) << 16) | (_events)) +#define BNA_DOORBELL_IB_INT_DISABLE (0x40000000) + +/* Set the coalescing timer for the given ib */ +#define bna_ib_coalescing_timer_set(_i_dbell, _cls_timer) \ + ((_i_dbell)->doorbell_ack = BNA_DOORBELL_IB_INT_ACK((_cls_timer), 0)) + +/* Acks 'events' # of events for a given ib while disabling interrupts */ +#define bna_ib_ack_disable_irq(_i_dbell, _events) \ + (writel(BNA_DOORBELL_IB_INT_ACK(0, (_events)), \ + (_i_dbell)->doorbell_addr)) + +/* Acks 'events' # of events for a given ib */ +#define bna_ib_ack(_i_dbell, _events) \ + (writel(((_i_dbell)->doorbell_ack | (_events)), \ + (_i_dbell)->doorbell_addr)) + +#define bna_ib_start(_bna, _ib, _is_regular) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + if ((ib->intr_type == BNA_INTR_T_INTX)) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask &= ~(ib->intr_vector); \ + bna_intx_enable((_bna), intx_mask); \ + } \ + bna_ib_coalescing_timer_set(&ib->door_bell, \ + ib->coalescing_timeo); \ + if (_is_regular) \ + bna_ib_ack(&ib->door_bell, 0); \ +} + +#define bna_ib_stop(_bna, _ib) \ +{ \ + u32 intx_mask; \ + struct bna_ib *ib = _ib; \ + writel(BNA_DOORBELL_IB_INT_DISABLE, \ + ib->door_bell.doorbell_addr); \ + if (ib->intr_type == BNA_INTR_T_INTX) { \ + bna_intx_disable((_bna), intx_mask); \ + intx_mask |= ib->intr_vector; \ + bna_intx_enable((_bna), intx_mask); \ + } \ +} + +#define bna_txq_prod_indx_doorbell(_tcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_tcb)->producer_index), \ + (_tcb)->q_dbell)) + +#define bna_rxq_prod_indx_doorbell(_rcb) \ + (writel(BNA_DOORBELL_Q_PRD_IDX((_rcb)->producer_index), \ + (_rcb)->q_dbell)) + +/* TxQ, RxQ, CQ related bits, offsets, macros */ + +/* TxQ Entry Opcodes */ +#define BNA_TXQ_WI_SEND (0x402) /* Single Frame Transmission */ +#define BNA_TXQ_WI_SEND_LSO (0x403) /* Multi-Frame Transmission */ +#define BNA_TXQ_WI_EXTENSION (0x104) /* Extension WI */ + +/* TxQ Entry Control Flags */ +#define BNA_TXQ_WI_CF_FCOE_CRC BIT(8) +#define BNA_TXQ_WI_CF_IPID_MODE BIT(5) +#define BNA_TXQ_WI_CF_INS_PRIO BIT(4) +#define BNA_TXQ_WI_CF_INS_VLAN BIT(3) +#define BNA_TXQ_WI_CF_UDP_CKSUM BIT(2) +#define BNA_TXQ_WI_CF_TCP_CKSUM BIT(1) +#define BNA_TXQ_WI_CF_IP_CKSUM BIT(0) + +#define BNA_TXQ_WI_L4_HDR_N_OFFSET(_hdr_size, _offset) \ + (((_hdr_size) << 10) | ((_offset) & 0x3FF)) + +/* + * Completion Q defines + */ +/* CQ Entry Flags */ +#define BNA_CQ_EF_MAC_ERROR BIT(0) +#define BNA_CQ_EF_FCS_ERROR BIT(1) +#define BNA_CQ_EF_TOO_LONG BIT(2) +#define BNA_CQ_EF_FC_CRC_OK BIT(3) + +#define BNA_CQ_EF_RSVD1 BIT(4) +#define BNA_CQ_EF_L4_CKSUM_OK BIT(5) +#define BNA_CQ_EF_L3_CKSUM_OK BIT(6) +#define BNA_CQ_EF_HDS_HEADER BIT(7) + +#define BNA_CQ_EF_UDP BIT(8) +#define BNA_CQ_EF_TCP BIT(9) +#define BNA_CQ_EF_IP_OPTIONS BIT(10) +#define BNA_CQ_EF_IPV6 BIT(11) + +#define BNA_CQ_EF_IPV4 BIT(12) +#define BNA_CQ_EF_VLAN BIT(13) +#define BNA_CQ_EF_RSS BIT(14) +#define BNA_CQ_EF_RSVD2 BIT(15) + +#define BNA_CQ_EF_MCAST_MATCH BIT(16) +#define BNA_CQ_EF_MCAST BIT(17) +#define BNA_CQ_EF_BCAST BIT(18) +#define BNA_CQ_EF_REMOTE BIT(19) + +#define BNA_CQ_EF_LOCAL BIT(20) +/* CAT2 ASIC does not use bit 21 as per the SPEC. + * Bit 31 is set in every end of frame completion + */ +#define BNA_CQ_EF_EOP BIT(31) + +/* Data structures */ + +struct bna_reg_offset { + u32 fn_int_status; + u32 fn_int_mask; +}; + +struct bna_bit_defn { + u32 mbox_status_bits; + u32 mbox_mask_bits; + u32 error_status_bits; + u32 error_mask_bits; + u32 halt_status_bits; + u32 halt_mask_bits; +}; + +struct bna_reg { + void __iomem *fn_int_status; + void __iomem *fn_int_mask; +}; + +/* TxQ Vector (a.k.a. Tx-Buffer Descriptor) */ +struct bna_dma_addr { + u32 msb; + u32 lsb; +}; + +struct bna_txq_wi_vector { + u16 reserved; + u16 length; /* Only 14 LSB are valid */ + struct bna_dma_addr host_addr; /* Tx-Buf DMA addr */ +}; + +/* TxQ Entry Structure + * + * BEWARE: Load values into this structure with correct endianness. + */ +struct bna_txq_entry { + union { + struct { + u8 reserved; + u8 num_vectors; /* number of vectors present */ + u16 opcode; /* Either */ + /* BNA_TXQ_WI_SEND or */ + /* BNA_TXQ_WI_SEND_LSO */ + u16 flags; /* OR of all the flags */ + u16 l4_hdr_size_n_offset; + u16 vlan_tag; + u16 lso_mss; /* Only 14 LSB are valid */ + u32 frame_length; /* Only 24 LSB are valid */ + } wi; + + struct { + u16 reserved; + u16 opcode; /* Must be */ + /* BNA_TXQ_WI_EXTENSION */ + u32 reserved2[3]; /* Place holder for */ + /* removed vector (12 bytes) */ + } wi_ext; + } hdr; + struct bna_txq_wi_vector vector[4]; +}; + +/* RxQ Entry Structure */ +struct bna_rxq_entry { /* Rx-Buffer */ + struct bna_dma_addr host_addr; /* Rx-Buffer DMA address */ +}; + +/* CQ Entry Structure */ +struct bna_cq_entry { + u32 flags; + u16 vlan_tag; + u16 length; + u32 rss_hash; + u8 valid; + u8 reserved1; + u8 reserved2; + u8 rxq_id; +}; + +#endif /* __BNA_HW_DEFS_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c new file mode 100644 index 0000000000..c05dc7a1c4 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c @@ -0,0 +1,3667 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include "bna.h" +#include "bfi.h" + +/* IB */ +static void +bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo) +{ + ib->coalescing_timeo = coalescing_timeo; + ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK( + (u32)ib->coalescing_timeo, 0); +} + +/* RXF */ + +#define bna_rxf_vlan_cfg_soft_reset(rxf) \ +do { \ + (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; \ + (rxf)->vlan_strip_pending = true; \ +} while (0) + +#define bna_rxf_rss_cfg_soft_reset(rxf) \ +do { \ + if ((rxf)->rss_status == BNA_STATUS_T_ENABLED) \ + (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING | \ + BNA_RSS_F_CFG_PENDING | \ + BNA_RSS_F_STATUS_PENDING); \ +} while (0) + +static int bna_rxf_cfg_apply(struct bna_rxf *rxf); +static void bna_rxf_cfg_reset(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf); +static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); +static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, + enum bna_cleanup_type cleanup); + +bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf, + enum bna_rxf_event); +bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf, + enum bna_rxf_event); + +static void +bna_rxf_sm_stopped_entry(struct bna_rxf *rxf) +{ + call_rxf_stop_cbfn(rxf); +} + +static void +bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_START: + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + case RXF_E_STOP: + call_rxf_stop_cbfn(rxf); + break; + + case RXF_E_FAIL: + /* No-op */ + break; + + case RXF_E_CONFIG: + call_rxf_cam_fltr_cbfn(rxf); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf) +{ + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } +} + +static void +bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait); + break; + + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + /* No-op */ + break; + + case RXF_E_FW_RESP: + if (!bna_rxf_cfg_apply(rxf)) { + /* No more pending config updates */ + bfa_fsm_set_state(rxf, bna_rxf_sm_started); + } + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_started_entry(struct bna_rxf *rxf) +{ + call_rxf_start_cbfn(rxf); + call_rxf_cam_fltr_cbfn(rxf); +} + +static void +bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_STOP: + case RXF_E_FAIL: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + case RXF_E_CONFIG: + bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf) +{ +} + +static void +bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event) +{ + switch (event) { + case RXF_E_FAIL: + case RXF_E_FW_RESP: + bna_rxf_cfg_reset(rxf); + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac, + enum bfi_enet_h2i_msgs req_type) +{ + struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req))); + ether_addr_copy(req->mac_addr, mac->addr); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_ucast_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req))); + ether_addr_copy(req->mac_addr, mac->addr); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_add_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle) +{ + struct bfi_enet_mcast_del_req *req = + &rxf->bfi_enet_cmd.mcast_del_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ, + 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req))); + req->handle = htons(handle); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_mcast_del_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx) +{ + struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req; + int i; + int j; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req))); + req->block_idx = block_idx; + for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) { + j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) + req->bit_mask[i] = + htonl(rxf->vlan_filter_table[j]); + else + req->bit_mask[i] = 0xFFFFFFFF; + } + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_vlan_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_vlan_strip_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->vlan_strip_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rit_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req))); + req->size = htons(rxf->rit_size); + memcpy(&req->table[0], rxf->rit, rxf->rit_size); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rit_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_cfg(struct bna_rxf *rxf) +{ + struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req; + int i; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req))); + req->cfg.type = rxf->rss_cfg.hash_type; + req->cfg.mask = rxf->rss_cfg.hash_mask; + for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++) + req->cfg.key[i] = + htonl(rxf->rss_cfg.toeplitz_hash_key[i]); + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rss_cfg_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +static void +bna_bfi_rss_enable(struct bna_rxf *rxf) +{ + struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req))); + req->enable = rxf->rss_status; + bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_enable_req), &req->mh); + bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd); +} + +/* This function gets the multicast MAC that has already been added to CAM */ +static struct bna_mac * +bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr) +{ + struct bna_mac *mac; + + list_for_each_entry(mac, &rxf->mcast_active_q, qe) + if (ether_addr_equal(mac->addr, mac_addr)) + return mac; + + list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe) + if (ether_addr_equal(mac->addr, mac_addr)) + return mac; + + return NULL; +} + +static struct bna_mcam_handle * +bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle) +{ + struct bna_mcam_handle *mchandle; + + list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe) + if (mchandle->handle == handle) + return mchandle; + + return NULL; +} + +static void +bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle) +{ + struct bna_mac *mcmac; + struct bna_mcam_handle *mchandle; + + mcmac = bna_rxf_mcmac_get(rxf, mac_addr); + mchandle = bna_rxf_mchandle_get(rxf, handle); + if (mchandle == NULL) { + mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod); + mchandle->handle = handle; + mchandle->refcnt = 0; + list_add_tail(&mchandle->qe, &rxf->mcast_handle_q); + } + mchandle->refcnt++; + mcmac->handle = mchandle; +} + +static int +bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac, + enum bna_cleanup_type cleanup) +{ + struct bna_mcam_handle *mchandle; + int ret = 0; + + mchandle = mac->handle; + if (mchandle == NULL) + return ret; + + mchandle->refcnt--; + if (mchandle->refcnt == 0) { + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_del_req(rxf, mchandle->handle); + ret = 1; + } + list_del(&mchandle->qe); + bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle); + } + mac->handle = NULL; + + return ret; +} + +static int +bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + int ret; + + /* First delete multicast entries to maintain the count */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + mac = list_first_entry(&rxf->mcast_pending_del_q, + struct bna_mac, qe); + ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP); + list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); + if (ret) + return ret; + } + + /* Add multicast entries */ + if (!list_empty(&rxf->mcast_pending_add_q)) { + mac = list_first_entry(&rxf->mcast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->mcast_active_q); + bna_bfi_mcast_add_req(rxf, mac); + return 1; + } + + return 0; +} + +static int +bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf) +{ + u8 vlan_pending_bitmask; + int block_idx = 0; + + if (rxf->vlan_pending_bitmask) { + vlan_pending_bitmask = rxf->vlan_pending_bitmask; + while (!(vlan_pending_bitmask & 0x1)) { + block_idx++; + vlan_pending_bitmask >>= 1; + } + rxf->vlan_pending_bitmask &= ~BIT(block_idx); + bna_bfi_rx_vlan_filter_set(rxf, block_idx); + return 1; + } + + return 0; +} + +static int +bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct bna_mac *mac; + int ret; + + /* Throw away delete pending mcast entries */ + while (!list_empty(&rxf->mcast_pending_del_q)) { + mac = list_first_entry(&rxf->mcast_pending_del_q, + struct bna_mac, qe); + ret = bna_rxf_mcast_del(rxf, mac, cleanup); + list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna)); + if (ret) + return ret; + } + + /* Move active mcast entries to pending_add_q */ + while (!list_empty(&rxf->mcast_active_q)) { + mac = list_first_entry(&rxf->mcast_active_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); + if (bna_rxf_mcast_del(rxf, mac, cleanup)) + return 1; + } + + return 0; +} + +static int +bna_rxf_rss_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->rss_pending) { + if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING; + bna_bfi_rit_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING; + bna_bfi_rss_cfg(rxf); + return 1; + } + + if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) { + rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING; + bna_bfi_rss_enable(rxf); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_cfg_apply(struct bna_rxf *rxf) +{ + if (bna_rxf_ucast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_mcast_cfg_apply(rxf)) + return 1; + + if (bna_rxf_promisc_cfg_apply(rxf)) + return 1; + + if (bna_rxf_allmulti_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_cfg_apply(rxf)) + return 1; + + if (bna_rxf_vlan_strip_cfg_apply(rxf)) + return 1; + + if (bna_rxf_rss_cfg_apply(rxf)) + return 1; + + return 0; +} + +static void +bna_rxf_cfg_reset(struct bna_rxf *rxf) +{ + bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP); + bna_rxf_vlan_cfg_soft_reset(rxf); + bna_rxf_rss_cfg_soft_reset(rxf); +} + +static void +bna_rit_init(struct bna_rxf *rxf, int rit_size) +{ + struct bna_rx *rx = rxf->rx; + struct bna_rxp *rxp; + int offset = 0; + + rxf->rit_size = rit_size; + list_for_each_entry(rxp, &rx->rxp_q, qe) { + rxf->rit[offset] = rxp->cq.ccb->id; + offset++; + } +} + +void +bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void +bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rsp *rsp = + container_of(msghdr, struct bfi_enet_rsp, mh); + + if (rsp->error) { + /* Clear ucast from cache */ + rxf->ucast_active_set = 0; + } + + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +void +bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf, + struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_mcast_add_req *req = + &rxf->bfi_enet_cmd.mcast_add_req; + struct bfi_enet_mcast_add_rsp *rsp = + container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh); + + bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr, + ntohs(rsp->handle)); + bfa_fsm_send_event(rxf, RXF_E_FW_RESP); +} + +static void +bna_rxf_init(struct bna_rxf *rxf, + struct bna_rx *rx, + struct bna_rx_config *q_config, + struct bna_res_info *res_info) +{ + rxf->rx = rx; + + INIT_LIST_HEAD(&rxf->ucast_pending_add_q); + INIT_LIST_HEAD(&rxf->ucast_pending_del_q); + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + INIT_LIST_HEAD(&rxf->ucast_active_q); + rxf->ucast_pending_mac = NULL; + + INIT_LIST_HEAD(&rxf->mcast_pending_add_q); + INIT_LIST_HEAD(&rxf->mcast_pending_del_q); + INIT_LIST_HEAD(&rxf->mcast_active_q); + INIT_LIST_HEAD(&rxf->mcast_handle_q); + + rxf->rit = (u8 *) + res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva; + bna_rit_init(rxf, q_config->num_paths); + + rxf->rss_status = q_config->rss_status; + if (rxf->rss_status == BNA_STATUS_T_ENABLED) { + rxf->rss_cfg = q_config->rss_config; + rxf->rss_pending |= BNA_RSS_F_CFG_PENDING; + rxf->rss_pending |= BNA_RSS_F_RIT_PENDING; + rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING; + } + + rxf->vlan_filter_status = BNA_STATUS_T_DISABLED; + memset(rxf->vlan_filter_table, 0, + (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32))); + rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */ + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + + rxf->vlan_strip_status = q_config->vlan_strip_status; + + bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); +} + +static void +bna_rxf_uninit(struct bna_rxf *rxf) +{ + struct bna_mac *mac; + + rxf->ucast_pending_set = 0; + rxf->ucast_active_set = 0; + + while (!list_empty(&rxf->ucast_pending_add_q)) { + mac = list_first_entry(&rxf->ucast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna)); + } + + if (rxf->ucast_pending_mac) { + list_add_tail(&rxf->ucast_pending_mac->qe, + bna_ucam_mod_free_q(rxf->rx->bna)); + rxf->ucast_pending_mac = NULL; + } + + while (!list_empty(&rxf->mcast_pending_add_q)) { + mac = list_first_entry(&rxf->mcast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); + } + + rxf->rxmode_pending = 0; + rxf->rxmode_pending_bitmask = 0; + if (rxf->rx->bna->promisc_rid == rxf->rx->rid) + rxf->rx->bna->promisc_rid = BFI_INVALID_RID; + if (rxf->rx->bna->default_mode_rid == rxf->rx->rid) + rxf->rx->bna->default_mode_rid = BFI_INVALID_RID; + + rxf->rss_pending = 0; + rxf->vlan_strip_pending = false; + + rxf->rx = NULL; +} + +static void +bna_rx_cb_rxf_started(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STARTED); +} + +static void +bna_rxf_start(struct bna_rxf *rxf) +{ + rxf->start_cbfn = bna_rx_cb_rxf_started; + rxf->start_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_START); +} + +static void +bna_rx_cb_rxf_stopped(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_RXF_STOPPED); +} + +static void +bna_rxf_stop(struct bna_rxf *rxf) +{ + rxf->stop_cbfn = bna_rx_cb_rxf_stopped; + rxf->stop_cbarg = rxf->rx; + bfa_fsm_send_event(rxf, RXF_E_STOP); +} + +static void +bna_rxf_fail(struct bna_rxf *rxf) +{ + bfa_fsm_send_event(rxf, RXF_E_FAIL); +} + +enum bna_cb_status +bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->ucast_pending_mac == NULL) { + rxf->ucast_pending_mac = + bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna)); + if (rxf->ucast_pending_mac == NULL) + return BNA_CB_UCAST_CAM_FULL; + } + + ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac); + rxf->ucast_pending_set = 1; + rxf->cam_fltr_cbfn = NULL; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr, + void (*cbfn)(struct bnad *, struct bna_rx *)) +{ + struct bna_rxf *rxf = &rx->rxf; + struct bna_mac *mac; + + /* Check if already added or pending addition */ + if (bna_mac_find(&rxf->mcast_active_q, addr) || + bna_mac_find(&rxf->mcast_pending_add_q, addr)) { + if (cbfn) + cbfn(rx->bna->bnad, rx); + return BNA_CB_SUCCESS; + } + + mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna)); + if (mac == NULL) + return BNA_CB_MCAST_LIST_FULL; + ether_addr_copy(mac->addr, addr); + list_add_tail(&mac->qe, &rxf->mcast_pending_add_q); + + rxf->cam_fltr_cbfn = cbfn; + rxf->cam_fltr_cbarg = rx->bna->bnad; + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; +} + +enum bna_cb_status +bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist) +{ + struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + const u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + + /* Purge the pending_add_q */ + while (!list_empty(&rxf->ucast_pending_add_q)) { + mac = list_first_entry(&rxf->ucast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &ucam_mod->free_q); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->ucast_active_q)) { + mac = list_first_entry(&rxf->ucast_active_q, + struct bna_mac, qe); + del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q); + ether_addr_copy(del_mac->addr, mac->addr); + del_mac->handle = mac->handle; + list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q); + list_move_tail(&mac->qe, &ucam_mod->free_q); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = uclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&ucam_mod->free_q); + if (mac == NULL) + goto err_return; + ether_addr_copy(mac->addr, mcaddr); + list_add_tail(&mac->qe, &list_head); + mcaddr += ETH_ALEN; + } + + /* Add the new entries */ + while (!list_empty(&list_head)) { + mac = list_first_entry(&list_head, struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); + } + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + mac = list_first_entry(&list_head, struct bna_mac, qe); + list_move_tail(&mac->qe, &ucam_mod->free_q); + } + + return BNA_CB_UCAST_CAM_FULL; +} + +enum bna_cb_status +bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist) +{ + struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod; + struct bna_rxf *rxf = &rx->rxf; + struct list_head list_head; + const u8 *mcaddr; + struct bna_mac *mac, *del_mac; + int i; + + /* Purge the pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + mac = list_first_entry(&rxf->mcast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &mcam_mod->free_q); + } + + /* Schedule active_q entries for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + mac = list_first_entry(&rxf->mcast_active_q, + struct bna_mac, qe); + del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q); + ether_addr_copy(del_mac->addr, mac->addr); + del_mac->handle = mac->handle; + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + list_move_tail(&mac->qe, &mcam_mod->free_q); + } + + /* Allocate nodes */ + INIT_LIST_HEAD(&list_head); + for (i = 0, mcaddr = mclist; i < count; i++) { + mac = bna_cam_mod_mac_get(&mcam_mod->free_q); + if (mac == NULL) + goto err_return; + ether_addr_copy(mac->addr, mcaddr); + list_add_tail(&mac->qe, &list_head); + + mcaddr += ETH_ALEN; + } + + /* Add the new entries */ + while (!list_empty(&list_head)) { + mac = list_first_entry(&list_head, struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->mcast_pending_add_q); + } + + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + + return BNA_CB_SUCCESS; + +err_return: + while (!list_empty(&list_head)) { + mac = list_first_entry(&list_head, struct bna_mac, qe); + list_move_tail(&mac->qe, &mcam_mod->free_q); + } + + return BNA_CB_MCAST_LIST_FULL; +} + +void +bna_rx_mcast_delall(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + struct bna_mac *mac, *del_mac; + int need_hw_config = 0; + + /* Purge all entries from pending_add_q */ + while (!list_empty(&rxf->mcast_pending_add_q)) { + mac = list_first_entry(&rxf->mcast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); + } + + /* Schedule all entries in active_q for deletion */ + while (!list_empty(&rxf->mcast_active_q)) { + mac = list_first_entry(&rxf->mcast_active_q, + struct bna_mac, qe); + list_del(&mac->qe); + del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna)); + memcpy(del_mac, mac, sizeof(*del_mac)); + list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q); + mac->handle = NULL; + list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna)); + need_hw_config = 1; + } + + if (need_hw_config) + bfa_fsm_send_event(rxf, RXF_E_CONFIG); +} + +void +bna_rx_vlan_add(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] |= bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= BIT(group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_del(struct bna_rx *rx, int vlan_id) +{ + struct bna_rxf *rxf = &rx->rxf; + int index = (vlan_id >> BFI_VLAN_WORD_SHIFT); + int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK); + int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT); + + rxf->vlan_filter_table[index] &= ~bit; + if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) { + rxf->vlan_pending_bitmask |= BIT(group_id); + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +static int +bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf) +{ + struct bna_mac *mac = NULL; + + /* Delete MAC addresses previousely added */ + if (!list_empty(&rxf->ucast_pending_del_q)) { + mac = list_first_entry(&rxf->ucast_pending_del_q, + struct bna_mac, qe); + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna)); + return 1; + } + + /* Set default unicast MAC */ + if (rxf->ucast_pending_set) { + rxf->ucast_pending_set = 0; + ether_addr_copy(rxf->ucast_active_mac.addr, + rxf->ucast_pending_mac->addr); + rxf->ucast_active_set = 1; + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_SET_REQ); + return 1; + } + + /* Add additional MAC entries */ + if (!list_empty(&rxf->ucast_pending_add_q)) { + mac = list_first_entry(&rxf->ucast_pending_add_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->ucast_active_q); + bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); + return 1; + } + + return 0; +} + +static int +bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct bna_mac *mac; + + /* Throw away delete pending ucast entries */ + while (!list_empty(&rxf->ucast_pending_del_q)) { + mac = list_first_entry(&rxf->ucast_pending_del_q, + struct bna_mac, qe); + if (cleanup == BNA_SOFT_CLEANUP) + list_move_tail(&mac->qe, + bna_ucam_mod_del_q(rxf->rx->bna)); + else { + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + list_move_tail(&mac->qe, + bna_ucam_mod_del_q(rxf->rx->bna)); + return 1; + } + } + + /* Move active ucast entries to pending_add_q */ + while (!list_empty(&rxf->ucast_active_q)) { + mac = list_first_entry(&rxf->ucast_active_q, + struct bna_mac, qe); + list_move_tail(&mac->qe, &rxf->ucast_pending_add_q); + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_ucast_req(rxf, mac, + BFI_ENET_H2I_MAC_UCAST_DEL_REQ); + return 1; + } + } + + if (rxf->ucast_active_set) { + rxf->ucast_pending_set = 1; + rxf->ucast_active_set = 0; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac, + BFI_ENET_H2I_MAC_UCAST_CLR_REQ); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + + /* Enable/disable promiscuous mode */ + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_PROMISC; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move promisc configuration from pending -> active */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + struct bna *bna = rxf->rx->bna; + + /* Clear pending promisc mode disable */ + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + bna->promisc_rid = BFI_INVALID_RID; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + /* Move promisc mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_PROMISC; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf) +{ + /* Enable/disable allmulti mode */ + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active |= BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED); + return 1; + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* move allmulti configuration from pending -> active */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + + return 0; +} + +static int +bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup) +{ + /* Clear pending allmulti mode disable */ + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + /* Move allmulti mode config from active -> pending */ + if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI; + if (cleanup == BNA_HARD_CLEANUP) { + bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED); + return 1; + } + } + + return 0; +} + +static int +bna_rxf_promisc_enable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_PROMISC)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = rxf->rx->rid; + ret = 1; + } + + return ret; +} + +static int +bna_rxf_promisc_disable(struct bna_rxf *rxf) +{ + struct bna *bna = rxf->rx->bna; + int ret = 0; + + if (is_promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_promisc_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + promisc_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + bna->promisc_rid = BFI_INVALID_RID; + } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) { + /* Schedule disable */ + promisc_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_enable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) { + /* Do nothing if pending enable or already enabled */ + } else if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending disable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else { + /* Schedule enable */ + allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_allmulti_disable(struct bna_rxf *rxf) +{ + int ret = 0; + + if (is_allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask) || + (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) { + /* Do nothing if pending disable or already disabled */ + } else if (is_allmulti_enable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask)) { + /* Turn off pending enable command */ + allmulti_inactive(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) { + /* Schedule disable */ + allmulti_disable(rxf->rxmode_pending, + rxf->rxmode_pending_bitmask); + ret = 1; + } + + return ret; +} + +static int +bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf) +{ + if (rxf->vlan_strip_pending) { + rxf->vlan_strip_pending = false; + bna_bfi_vlan_strip_enable(rxf); + return 1; + } + + return 0; +} + +/* RX */ + +#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \ + (qcfg)->num_paths : ((qcfg)->num_paths * 2)) + +#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\ + (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)) + +#define call_rx_stop_cbfn(rx) \ +do { \ + if ((rx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_rx *); \ + void *cbarg; \ + cbfn = (rx)->stop_cbfn; \ + cbarg = (rx)->stop_cbarg; \ + (rx)->stop_cbfn = NULL; \ + (rx)->stop_cbarg = NULL; \ + cbfn(cbarg, rx); \ + } \ +} while (0) + +#define call_rx_stall_cbfn(rx) \ +do { \ + if ((rx)->rx_stall_cbfn) \ + (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx)); \ +} while (0) + +#define bfi_enet_datapath_q_init(bfi_q, bna_qpt) \ +do { \ + struct bna_dma_addr cur_q_addr = \ + *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr)); \ + (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb; \ + (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb; \ + (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb; \ + (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb; \ + (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \ + (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\ +} while (0) + +static void bna_bfi_rx_enet_start(struct bna_rx *rx); +static void bna_rx_enet_stop(struct bna_rx *rx); +static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx); + +bfa_fsm_state_decl(bna_rx, stopped, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, start_stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_start_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, started, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, rxf_stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, stop_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, cleanup_wait, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, failed, + struct bna_rx, enum bna_rx_event); +bfa_fsm_state_decl(bna_rx, quiesce_wait, + struct bna_rx, enum bna_rx_event); + +static void bna_rx_sm_stopped_entry(struct bna_rx *rx) +{ + call_rx_stop_cbfn(rx); +} + +static void bna_rx_sm_stopped(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + case RX_E_STOP: + call_rx_stop_cbfn(rx); + break; + + case RX_E_FAIL: + /* no-op */ + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait_entry(struct bna_rx *rx) +{ + bna_bfi_rx_enet_start(rx); +} + +static void +bna_rx_sm_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx) +{ + rx->rx_post_cbfn(rx->bna->bnad, rx); + bna_rxf_start(&rx->rxf); +} + +static void +bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_RXF_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stop_wait); + call_rx_stall_cbfn(rx); + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + break; + } + +} + +static void +bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_STOPPED: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + case RX_E_STARTED: + bna_rx_enet_stop(rx); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_rx_sm_started_entry(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + int is_regular = (rx->type == BNA_RX_T_REGULAR); + + /* Start IB */ + list_for_each_entry(rxp, &rx->rxp_q, qe) + bna_ib_start(rx->bna, &rxp->cq.ib, is_regular); + + bna_ethport_cb_rx_started(&rx->bna->ethport); +} + +static void +bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_stop(&rx->rxf); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_ethport_cb_rx_stopped(&rx->bna->ethport); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx, + enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + bna_rxf_fail(&rx->rxf); + call_rx_stall_cbfn(rx); + rx->rx_cleanup_cbfn(rx->bna->bnad, rx); + break; + + case RX_E_RXF_STARTED: + bfa_fsm_set_state(rx, bna_rx_sm_started); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_FAIL: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_rx_sm_failed_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_START: + bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait); + break; + + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + case RX_E_RXF_STARTED: + case RX_E_RXF_STOPPED: + /* No-op */ + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + break; +} } + +static void +bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx) +{ +} + +static void +bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event) +{ + switch (event) { + case RX_E_STOP: + bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait); + break; + + case RX_E_FAIL: + bfa_fsm_set_state(rx, bna_rx_sm_failed); + break; + + case RX_E_CLEANUP_DONE: + bfa_fsm_set_state(rx, bna_rx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + break; + } +} + +static void +bna_bfi_rx_enet_start(struct bna_rx *rx) +{ + struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req))); + + cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet); + cfg_req->num_queue_sets = rx->num_paths; + for (i = 0; i < rx->num_paths; i++) { + rxp = rxp ? list_next_entry(rxp, qe) + : list_first_entry(&rx->rxp_q, struct bna_rxp, qe); + GET_RXQS(rxp, q0, q1); + switch (rxp->type) { + case BNA_RXP_SLR: + case BNA_RXP_HDS: + /* Small RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, + &q1->qpt); + cfg_req->q_cfg[i].qs.rx_buffer_size = + htons((u16)q1->buffer_size); + fallthrough; + + case BNA_RXP_SINGLE: + /* Large/Single RxQ */ + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, + &q0->qpt); + if (q0->multi_buffer) + /* multi-buffer is enabled by allocating + * a new rx with new set of resources. + * q0->buffer_size should be initialized to + * fragment size. + */ + cfg_req->rx_cfg.multi_buffer = + BNA_STATUS_T_ENABLED; + else + q0->buffer_size = + bna_enet_mtu_get(&rx->bna->enet); + cfg_req->q_cfg[i].ql.rx_buffer_size = + htons((u16)q0->buffer_size); + break; + + default: + BUG_ON(1); + } + + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, + &rxp->cq.qpt); + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + rxp->cq.ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + rxp->cq.ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)rxp->cq.ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : + BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)rxp->cq.ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)rxp->cq.ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count; + + switch (rxp->type) { + case BNA_RXP_SLR: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL; + break; + + case BNA_RXP_HDS: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS; + cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type; + cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset; + cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset; + break; + + case BNA_RXP_SINGLE: + cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE; + break; + + default: + BUG_ON(1); + } + cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status; + + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_bfi_rx_enet_stop(struct bna_rx *rx) +{ + struct bfi_enet_req *req = &rx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd); +} + +static void +bna_rx_enet_stop(struct bna_rx *rx) +{ + struct bna_rxp *rxp; + + /* Stop IB */ + list_for_each_entry(rxp, &rx->rxp_q, qe) + bna_ib_stop(rx->bna, &rxp->cq.ib); + + bna_bfi_rx_enet_stop(rx); +} + +static int +bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg) +{ + if ((rx_mod->rx_free_count == 0) || + (rx_mod->rxp_free_count == 0) || + (rx_mod->rxq_free_count == 0)) + return 0; + + if (rx_cfg->rxp_type == BNA_RXP_SINGLE) { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < rx_cfg->num_paths)) + return 0; + } else { + if ((rx_mod->rxp_free_count < rx_cfg->num_paths) || + (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths))) + return 0; + } + + return 1; +} + +static struct bna_rxq * +bna_rxq_get(struct bna_rx_mod *rx_mod) +{ + struct bna_rxq *rxq = NULL; + + rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe); + list_del(&rxq->qe); + rx_mod->rxq_free_count--; + + return rxq; +} + +static void +bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq) +{ + list_add_tail(&rxq->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; +} + +static struct bna_rxp * +bna_rxp_get(struct bna_rx_mod *rx_mod) +{ + struct bna_rxp *rxp = NULL; + + rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe); + list_del(&rxp->qe); + rx_mod->rxp_free_count--; + + return rxp; +} + +static void +bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp) +{ + list_add_tail(&rxp->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; +} + +static struct bna_rx * +bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx = NULL; + + BUG_ON(list_empty(&rx_mod->rx_free_q)); + if (type == BNA_RX_T_REGULAR) + rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe); + else + rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe); + + rx_mod->rx_free_count--; + list_move_tail(&rx->qe, &rx_mod->rx_active_q); + rx->type = type; + + return rx; +} + +static void +bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx) +{ + struct list_head *qe; + + list_for_each_prev(qe, &rx_mod->rx_free_q) + if (((struct bna_rx *)qe)->rid < rx->rid) + break; + + list_add(&rx->qe, qe); + rx_mod->rx_free_count++; +} + +static void +bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0, + struct bna_rxq *q1) +{ + switch (rxp->type) { + case BNA_RXP_SINGLE: + rxp->rxq.single.only = q0; + rxp->rxq.single.reserved = NULL; + break; + case BNA_RXP_SLR: + rxp->rxq.slr.large = q0; + rxp->rxq.slr.small = q1; + break; + case BNA_RXP_HDS: + rxp->rxq.hds.data = q0; + rxp->rxq.hds.hdr = q1; + break; + default: + break; + } +} + +static void +bna_rxq_qpt_setup(struct bna_rxq *rxq, + struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxq->qpt.kv_qpt_ptr = qpt_mem->kva; + rxq->qpt.page_count = page_count; + rxq->qpt.page_size = page_size; + + rxq->rcb->sw_qpt = (void **) swqpt_mem->kva; + rxq->rcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < rxq->qpt.page_count; i++) { + rxq->rcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static void +bna_rxp_cqpt_setup(struct bna_rxp *rxp, + u32 page_count, + u32 page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva; + rxp->cq.qpt.page_count = page_count; + rxp->cq.qpt.page_size = page_size; + + rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva; + rxp->cq.ccb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < rxp->cq.qpt.page_count; i++) { + rxp->cq.ccb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static void +bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + bfa_wc_down(&rx_mod->rx_stop_wc); +} + +static void +bna_rx_mod_cb_rx_stopped_all(void *arg) +{ + struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg; + + if (rx_mod->stop_cbfn) + rx_mod->stop_cbfn(&rx_mod->bna->enet); + rx_mod->stop_cbfn = NULL; +} + +static void +bna_rx_start(struct bna_rx *rx) +{ + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + if (rx->rx_flags & BNA_RX_F_ENABLED) + bfa_fsm_send_event(rx, RX_E_START); +} + +static void +bna_rx_stop(struct bna_rx *rx) +{ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + if (rx->fsm == bna_rx_sm_stopped) + bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx); + else { + rx->stop_cbfn = bna_rx_mod_cb_rx_stopped; + rx->stop_cbarg = &rx->bna->rx_mod; + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +static void +bna_rx_fail(struct bna_rx *rx) +{ + /* Indicate Enet is not enabled, and failed */ + rx->rx_flags &= ~BNA_RX_F_ENET_STARTED; + bfa_fsm_send_event(rx, RX_E_FAIL); +} + +void +bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + + rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED; + if (type == BNA_RX_T_LOOPBACK) + rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each_entry(rx, &rx_mod->rx_active_q, qe) + if (rx->type == type) + bna_rx_start(rx); +} + +void +bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type) +{ + struct bna_rx *rx; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + rx_mod->stop_cbfn = bna_enet_cb_rx_stopped; + + bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod); + + list_for_each_entry(rx, &rx_mod->rx_active_q, qe) + if (rx->type == type) { + bfa_wc_up(&rx_mod->rx_stop_wc); + bna_rx_stop(rx); + } + + bfa_wc_wait(&rx_mod->rx_stop_wc); +} + +void +bna_rx_mod_fail(struct bna_rx_mod *rx_mod) +{ + struct bna_rx *rx; + + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED; + rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK; + + list_for_each_entry(rx, &rx_mod->rx_active_q, qe) + bna_rx_fail(rx); +} + +void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int index; + struct bna_rx *rx_ptr; + struct bna_rxp *rxp_ptr; + struct bna_rxq *rxq_ptr; + + rx_mod->bna = bna; + rx_mod->flags = 0; + + rx_mod->rx = (struct bna_rx *) + res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxp = (struct bna_rxp *) + res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva; + rx_mod->rxq = (struct bna_rxq *) + res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + /* Initialize the queues */ + INIT_LIST_HEAD(&rx_mod->rx_free_q); + rx_mod->rx_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxq_free_q); + rx_mod->rxq_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rxp_free_q); + rx_mod->rxp_free_count = 0; + INIT_LIST_HEAD(&rx_mod->rx_active_q); + + /* Build RX queues */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rx_ptr = &rx_mod->rx[index]; + + INIT_LIST_HEAD(&rx_ptr->rxp_q); + rx_ptr->bna = NULL; + rx_ptr->rid = index; + rx_ptr->stop_cbfn = NULL; + rx_ptr->stop_cbarg = NULL; + + list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q); + rx_mod->rx_free_count++; + } + + /* build RX-path queue */ + for (index = 0; index < bna->ioceth.attr.num_rxp; index++) { + rxp_ptr = &rx_mod->rxp[index]; + list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q); + rx_mod->rxp_free_count++; + } + + /* build RXQ queue */ + for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) { + rxq_ptr = &rx_mod->rxq[index]; + list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q); + rx_mod->rxq_free_count++; + } +} + +void +bna_rx_mod_uninit(struct bna_rx_mod *rx_mod) +{ + rx_mod->bna = NULL; +} + +void +bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp; + struct bna_rxp *rxp = NULL; + struct bna_rxq *q0 = NULL, *q1 = NULL; + int i; + + bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_rx_cfg_rsp)); + + rx->hw_id = cfg_rsp->hw_id; + + for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); + i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) { + GET_RXQS(rxp, q0, q1); + + /* Setup doorbells */ + rxp->cq.ccb->i_dbell->doorbell_addr = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid; + q0->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].ql_dbell); + q0->hw_id = cfg_rsp->q_handles[i].hw_lqid; + if (q1) { + q1->rcb->q_dbell = + rx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].qs_dbell); + q1->hw_id = cfg_rsp->q_handles[i].hw_sqid; + } + + /* Initialize producer/consumer indexes */ + (*rxp->cq.ccb->hw_producer_index) = 0; + rxp->cq.ccb->producer_index = 0; + q0->rcb->producer_index = q0->rcb->consumer_index = 0; + if (q1) + q1->rcb->producer_index = q1->rcb->consumer_index = 0; + } + + bfa_fsm_send_event(rx, RX_E_STARTED); +} + +void +bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(rx, RX_E_STOPPED); +} + +void +bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info) +{ + u32 cq_size, hq_size, dq_size; + u32 cpage_count, hpage_count, dpage_count; + struct bna_mem_info *mem_info; + u32 cq_depth; + u32 hq_depth; + u32 dq_depth; + + dq_depth = q_cfg->q0_depth; + hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth); + cq_depth = roundup_pow_of_two(dq_depth + hq_depth); + + cq_size = cq_depth * BFI_CQ_WI_SIZE; + cq_size = ALIGN(cq_size, PAGE_SIZE); + cpage_count = SIZE_TO_PAGES(cq_size); + + dq_depth = roundup_pow_of_two(dq_depth); + dq_size = dq_depth * BFI_RXQ_WI_SIZE; + dq_size = ALIGN(dq_size, PAGE_SIZE); + dpage_count = SIZE_TO_PAGES(dq_size); + + if (BNA_RXP_SINGLE != q_cfg->rxp_type) { + hq_depth = roundup_pow_of_two(hq_depth); + hq_size = hq_depth * BFI_RXQ_WI_SIZE; + hq_size = ALIGN(hq_size, PAGE_SIZE); + hpage_count = SIZE_TO_PAGES(hq_size); + } else + hpage_count = 0; + + res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_ccb); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_rcb); + mem_info->num = BNA_GET_RXQS(q_cfg); + + res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = cpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = cpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * cpage_count; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = dpage_count * sizeof(struct bna_dma_addr); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = dpage_count * sizeof(void *); + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * dpage_count; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = hpage_count * sizeof(struct bna_dma_addr); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = hpage_count * sizeof(void *); + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * hpage_count; + mem_info->num = (hpage_count ? q_cfg->num_paths : 0); + + res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = q_cfg->num_paths; + + res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = BFI_ENET_RSS_RIT_MAX; + mem_info->num = 1; + + res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX; + res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths; +} + +struct bna_rx * +bna_rx_create(struct bna *bna, struct bnad *bnad, + struct bna_rx_config *rx_cfg, + const struct bna_rx_event_cbfn *rx_cbfn, + struct bna_res_info *res_info, + void *priv) +{ + struct bna_rx_mod *rx_mod = &bna->rx_mod; + struct bna_rx *rx; + struct bna_rxp *rxp; + struct bna_rxq *q0; + struct bna_rxq *q1; + struct bna_intr_info *intr_info; + struct bna_mem_descr *hqunmap_mem; + struct bna_mem_descr *dqunmap_mem; + struct bna_mem_descr *ccb_mem; + struct bna_mem_descr *rcb_mem; + struct bna_mem_descr *cqpt_mem; + struct bna_mem_descr *cswqpt_mem; + struct bna_mem_descr *cpage_mem; + struct bna_mem_descr *hqpt_mem; + struct bna_mem_descr *dqpt_mem; + struct bna_mem_descr *hsqpt_mem; + struct bna_mem_descr *dsqpt_mem; + struct bna_mem_descr *hpage_mem; + struct bna_mem_descr *dpage_mem; + u32 dpage_count, hpage_count; + u32 hq_idx, dq_idx, rcb_idx; + u32 cq_depth, i; + u32 page_count; + + if (!bna_rx_res_check(rx_mod, rx_cfg)) + return NULL; + + intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0]; + rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0]; + dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0]; + hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0]; + cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0]; + cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0]; + cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0]; + hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0]; + dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0]; + hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0]; + dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0]; + hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0]; + dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0]; + + page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len / + PAGE_SIZE; + + dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len / + PAGE_SIZE; + + hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len / + PAGE_SIZE; + + rx = bna_rx_get(rx_mod, rx_cfg->rx_type); + rx->bna = bna; + rx->rx_flags = 0; + INIT_LIST_HEAD(&rx->rxp_q); + rx->stop_cbfn = NULL; + rx->stop_cbarg = NULL; + rx->priv = priv; + + rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn; + rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn; + rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn; + rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn; + rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn; + /* Following callbacks are mandatory */ + rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn; + rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn; + + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) { + switch (rx->type) { + case BNA_RX_T_REGULAR: + if (!(rx->bna->rx_mod.flags & + BNA_RX_MOD_F_ENET_LOOPBACK)) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + case BNA_RX_T_LOOPBACK: + if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK) + rx->rx_flags |= BNA_RX_F_ENET_STARTED; + break; + } + } + + rx->num_paths = rx_cfg->num_paths; + for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0; + i < rx->num_paths; i++) { + rxp = bna_rxp_get(rx_mod); + list_add_tail(&rxp->qe, &rx->rxp_q); + rxp->type = rx_cfg->rxp_type; + rxp->rx = rx; + rxp->cq.rx = rx; + + q0 = bna_rxq_get(rx_mod); + if (BNA_RXP_SINGLE == rx_cfg->rxp_type) + q1 = NULL; + else + q1 = bna_rxq_get(rx_mod); + + if (1 == intr_info->num) + rxp->vector = intr_info->idl[0].vector; + else + rxp->vector = intr_info->idl[i].vector; + + /* Setup IB */ + + rxp->cq.ib.ib_seg_host_addr.lsb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + rxp->cq.ib.ib_seg_host_addr.msb = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + rxp->cq.ib.ib_seg_host_addr_kva = + res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + rxp->cq.ib.intr_type = intr_info->intr_type; + if (intr_info->intr_type == BNA_INTR_T_MSIX) + rxp->cq.ib.intr_vector = rxp->vector; + else + rxp->cq.ib.intr_vector = BIT(rxp->vector); + rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo; + rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT; + rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO; + + bna_rxp_add_rxqs(rxp, q0, q1); + + /* Setup large Q */ + + q0->rx = rx; + q0->rxp = rxp; + + q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva; + rcb_idx++; dq_idx++; + q0->rcb->q_depth = rx_cfg->q0_depth; + q0->q_depth = rx_cfg->q0_depth; + q0->multi_buffer = rx_cfg->q0_multi_buf; + q0->buffer_size = rx_cfg->q0_buf_size; + q0->num_vecs = rx_cfg->q0_num_vecs; + q0->rcb->rxq = q0; + q0->rcb->bnad = bna->bnad; + q0->rcb->id = 0; + q0->rx_packets = q0->rx_bytes = 0; + q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0; + q0->rxbuf_map_failed = 0; + + bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE, + &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]); + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q0->rcb); + + /* Setup small Q */ + + if (q1) { + q1->rx = rx; + q1->rxp = rxp; + + q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva; + q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva; + rcb_idx++; hq_idx++; + q1->rcb->q_depth = rx_cfg->q1_depth; + q1->q_depth = rx_cfg->q1_depth; + q1->multi_buffer = BNA_STATUS_T_DISABLED; + q1->num_vecs = 1; + q1->rcb->rxq = q1; + q1->rcb->bnad = bna->bnad; + q1->rcb->id = 1; + q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ? + rx_cfg->hds_config.forced_offset + : rx_cfg->q1_buf_size; + q1->rx_packets = q1->rx_bytes = 0; + q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0; + q1->rxbuf_map_failed = 0; + + bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE, + &hqpt_mem[i], &hsqpt_mem[i], + &hpage_mem[i]); + + if (rx->rcb_setup_cbfn) + rx->rcb_setup_cbfn(bnad, q1->rcb); + } + + /* Setup CQ */ + + rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva; + cq_depth = rx_cfg->q0_depth + + ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ? + 0 : rx_cfg->q1_depth); + /* if multi-buffer is enabled sum of q0_depth + * and q1_depth need not be a power of 2 + */ + cq_depth = roundup_pow_of_two(cq_depth); + rxp->cq.ccb->q_depth = cq_depth; + rxp->cq.ccb->cq = &rxp->cq; + rxp->cq.ccb->rcb[0] = q0->rcb; + q0->rcb->ccb = rxp->cq.ccb; + if (q1) { + rxp->cq.ccb->rcb[1] = q1->rcb; + q1->rcb->ccb = rxp->cq.ccb; + } + rxp->cq.ccb->hw_producer_index = + (u32 *)rxp->cq.ib.ib_seg_host_addr_kva; + rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell; + rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type; + rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector; + rxp->cq.ccb->rx_coalescing_timeo = + rxp->cq.ib.coalescing_timeo; + rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0; + rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0; + rxp->cq.ccb->bnad = bna->bnad; + rxp->cq.ccb->id = i; + + bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE, + &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]); + + if (rx->ccb_setup_cbfn) + rx->ccb_setup_cbfn(bnad, rxp->cq.ccb); + } + + rx->hds_cfg = rx_cfg->hds_config; + + bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info); + + bfa_fsm_set_state(rx, bna_rx_sm_stopped); + + rx_mod->rid_mask |= BIT(rx->rid); + + return rx; +} + +void +bna_rx_destroy(struct bna_rx *rx) +{ + struct bna_rx_mod *rx_mod = &rx->bna->rx_mod; + struct bna_rxq *q0 = NULL; + struct bna_rxq *q1 = NULL; + struct bna_rxp *rxp; + struct list_head *qe; + + bna_rxf_uninit(&rx->rxf); + + while (!list_empty(&rx->rxp_q)) { + rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe); + list_del(&rxp->qe); + GET_RXQS(rxp, q0, q1); + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb); + q0->rcb = NULL; + q0->rxp = NULL; + q0->rx = NULL; + bna_rxq_put(rx_mod, q0); + + if (q1) { + if (rx->rcb_destroy_cbfn) + rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb); + q1->rcb = NULL; + q1->rxp = NULL; + q1->rx = NULL; + bna_rxq_put(rx_mod, q1); + } + rxp->rxq.slr.large = NULL; + rxp->rxq.slr.small = NULL; + + if (rx->ccb_destroy_cbfn) + rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb); + rxp->cq.ccb = NULL; + rxp->rx = NULL; + bna_rxp_put(rx_mod, rxp); + } + + list_for_each(qe, &rx_mod->rx_active_q) + if (qe == &rx->qe) { + list_del(&rx->qe); + break; + } + + rx_mod->rid_mask &= ~BIT(rx->rid); + + rx->bna = NULL; + rx->priv = NULL; + bna_rx_put(rx_mod, rx); +} + +void +bna_rx_enable(struct bna_rx *rx) +{ + if (rx->fsm != bna_rx_sm_stopped) + return; + + rx->rx_flags |= BNA_RX_F_ENABLED; + if (rx->rx_flags & BNA_RX_F_ENET_STARTED) + bfa_fsm_send_event(rx, RX_E_START); +} + +void +bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_rx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + /* h/w should not be accessed. Treat we're stopped */ + (*cbfn)(rx->bna->bnad, rx); + } else { + rx->stop_cbfn = cbfn; + rx->stop_cbarg = rx->bna->bnad; + + rx->rx_flags &= ~BNA_RX_F_ENABLED; + + bfa_fsm_send_event(rx, RX_E_STOP); + } +} + +void +bna_rx_cleanup_complete(struct bna_rx *rx) +{ + bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE); +} + +void +bna_rx_vlan_strip_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_ENABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_vlan_strip_disable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) { + rxf->vlan_strip_status = BNA_STATUS_T_DISABLED; + rxf->vlan_strip_pending = true; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +enum bna_cb_status +bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode, + enum bna_rxmode bitmask) +{ + struct bna_rxf *rxf = &rx->rxf; + int need_hw_config = 0; + + /* Error checks */ + + if (is_promisc_enable(new_mode, bitmask)) { + /* If promisc mode is already enabled elsewhere in the system */ + if ((rx->bna->promisc_rid != BFI_INVALID_RID) && + (rx->bna->promisc_rid != rxf->rx->rid)) + goto err_return; + + /* If default mode is already enabled in the system */ + if (rx->bna->default_mode_rid != BFI_INVALID_RID) + goto err_return; + + /* Trying to enable promiscuous and default mode together */ + if (is_default_enable(new_mode, bitmask)) + goto err_return; + } + + if (is_default_enable(new_mode, bitmask)) { + /* If default mode is already enabled elsewhere in the system */ + if ((rx->bna->default_mode_rid != BFI_INVALID_RID) && + (rx->bna->default_mode_rid != rxf->rx->rid)) { + goto err_return; + } + + /* If promiscuous mode is already enabled in the system */ + if (rx->bna->promisc_rid != BFI_INVALID_RID) + goto err_return; + } + + /* Process the commands */ + + if (is_promisc_enable(new_mode, bitmask)) { + if (bna_rxf_promisc_enable(rxf)) + need_hw_config = 1; + } else if (is_promisc_disable(new_mode, bitmask)) { + if (bna_rxf_promisc_disable(rxf)) + need_hw_config = 1; + } + + if (is_allmulti_enable(new_mode, bitmask)) { + if (bna_rxf_allmulti_enable(rxf)) + need_hw_config = 1; + } else if (is_allmulti_disable(new_mode, bitmask)) { + if (bna_rxf_allmulti_disable(rxf)) + need_hw_config = 1; + } + + /* Trigger h/w if needed */ + + if (need_hw_config) { + rxf->cam_fltr_cbfn = NULL; + rxf->cam_fltr_cbarg = rx->bna->bnad; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } + + return BNA_CB_SUCCESS; + +err_return: + return BNA_CB_FAIL; +} + +void +bna_rx_vlanfilter_enable(struct bna_rx *rx) +{ + struct bna_rxf *rxf = &rx->rxf; + + if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) { + rxf->vlan_filter_status = BNA_STATUS_T_ENABLED; + rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL; + bfa_fsm_send_event(rxf, RXF_E_CONFIG); + } +} + +void +bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo) +{ + struct bna_rxp *rxp; + + list_for_each_entry(rxp, &rx->rxp_q, qe) { + rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo; + bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo); + } +} + +void +bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX]) +{ + int i, j; + + for (i = 0; i < BNA_LOAD_T_MAX; i++) + for (j = 0; j < BNA_BIAS_T_MAX; j++) + bna->rx_mod.dim_vector[i][j] = vector[i][j]; +} + +void +bna_rx_dim_update(struct bna_ccb *ccb) +{ + struct bna *bna = ccb->cq->rx->bna; + u32 load, bias; + u32 pkt_rt, small_rt, large_rt; + u8 coalescing_timeo; + + if ((ccb->pkt_rate.small_pkt_cnt == 0) && + (ccb->pkt_rate.large_pkt_cnt == 0)) + return; + + /* Arrive at preconfigured coalescing timeo value based on pkt rate */ + + small_rt = ccb->pkt_rate.small_pkt_cnt; + large_rt = ccb->pkt_rate.large_pkt_cnt; + + pkt_rt = small_rt + large_rt; + + if (pkt_rt < BNA_PKT_RATE_10K) + load = BNA_LOAD_T_LOW_4; + else if (pkt_rt < BNA_PKT_RATE_20K) + load = BNA_LOAD_T_LOW_3; + else if (pkt_rt < BNA_PKT_RATE_30K) + load = BNA_LOAD_T_LOW_2; + else if (pkt_rt < BNA_PKT_RATE_40K) + load = BNA_LOAD_T_LOW_1; + else if (pkt_rt < BNA_PKT_RATE_50K) + load = BNA_LOAD_T_HIGH_1; + else if (pkt_rt < BNA_PKT_RATE_60K) + load = BNA_LOAD_T_HIGH_2; + else if (pkt_rt < BNA_PKT_RATE_80K) + load = BNA_LOAD_T_HIGH_3; + else + load = BNA_LOAD_T_HIGH_4; + + if (small_rt > (large_rt << 1)) + bias = 0; + else + bias = 1; + + ccb->pkt_rate.small_pkt_cnt = 0; + ccb->pkt_rate.large_pkt_cnt = 0; + + coalescing_timeo = bna->rx_mod.dim_vector[load][bias]; + ccb->rx_coalescing_timeo = coalescing_timeo; + + /* Set it to IB */ + bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo); +} + +const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = { + {12, 12}, + {6, 10}, + {5, 10}, + {4, 8}, + {3, 6}, + {3, 6}, + {2, 4}, + {1, 2}, +}; + +/* TX */ + +#define call_tx_stop_cbfn(tx) \ +do { \ + if ((tx)->stop_cbfn) { \ + void (*cbfn)(void *, struct bna_tx *); \ + void *cbarg; \ + cbfn = (tx)->stop_cbfn; \ + cbarg = (tx)->stop_cbarg; \ + (tx)->stop_cbfn = NULL; \ + (tx)->stop_cbarg = NULL; \ + cbfn(cbarg, (tx)); \ + } \ +} while (0) + +static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); +static void bna_bfi_tx_enet_start(struct bna_tx *tx); +static void bna_tx_enet_stop(struct bna_tx *tx); + +enum bna_tx_event { + TX_E_START = 1, + TX_E_STOP = 2, + TX_E_FAIL = 3, + TX_E_STARTED = 4, + TX_E_STOPPED = 5, + TX_E_CLEANUP_DONE = 7, + TX_E_BW_UPDATE = 8, +}; + +bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx, + enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event); +bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx, + enum bna_tx_event); + +static void +bna_tx_sm_stopped_entry(struct bna_tx *tx) +{ + call_tx_stop_cbfn(tx); +} + +static void +bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_STOP: + call_tx_stop_cbfn(tx); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_start_wait_entry(struct bna_tx *tx) +{ + bna_bfi_tx_enet_start(tx); +} + +static void +bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + tx->flags &= ~BNA_TX_F_BW_UPDATED; + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + tx->flags &= ~BNA_TX_F_BW_UPDATED; + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + case TX_E_STARTED: + if (tx->flags & BNA_TX_F_BW_UPDATED) { + tx->flags &= ~BNA_TX_F_BW_UPDATED; + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + } else + bfa_fsm_set_state(tx, bna_tx_sm_started); + break; + + case TX_E_BW_UPDATE: + tx->flags |= BNA_TX_F_BW_UPDATED; + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_started_entry(struct bna_tx *tx) +{ + struct bna_txq *txq; + int is_regular = (tx->type == BNA_TX_T_REGULAR); + + list_for_each_entry(txq, &tx->txq_q, qe) { + txq->tcb->priority = txq->priority; + /* Start IB */ + bna_ib_start(tx->bna, &txq->ib, is_regular); + } + tx->tx_resume_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + tx->tx_stall_cbfn(tx->bna->bnad, tx); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_BW_UPDATE: + bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_stop_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STARTED: + /** + * We are here due to start_wait -> stop_wait transition on + * TX_E_STOP event + */ + bna_tx_enet_stop(tx); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_FAIL: + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) +{ + tx->tx_stall_cbfn(tx->bna->bnad, tx); + bna_tx_enet_stop(tx); +} + +static void +bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); + break; + + case TX_E_STOPPED: + bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) +{ + tx->tx_cleanup_cbfn(tx->bna->bnad, tx); +} + +static void +bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_failed_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_START: + bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); + break; + + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + /* No-op */ + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) +{ +} + +static void +bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) +{ + switch (event) { + case TX_E_STOP: + bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); + break; + + case TX_E_FAIL: + bfa_fsm_set_state(tx, bna_tx_sm_failed); + break; + + case TX_E_CLEANUP_DONE: + bfa_fsm_set_state(tx, bna_tx_sm_start_wait); + break; + + case TX_E_BW_UPDATE: + /* No-op */ + break; + + default: + bfa_sm_fault(event); + } +} + +static void +bna_bfi_tx_enet_start(struct bna_tx *tx) +{ + struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; + struct bna_txq *txq = NULL; + int i; + + bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); + cfg_req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req))); + + cfg_req->num_queues = tx->num_txq; + for (i = 0; i < tx->num_txq; i++) { + txq = txq ? list_next_entry(txq, qe) + : list_first_entry(&tx->txq_q, struct bna_txq, qe); + bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); + cfg_req->q_cfg[i].q.priority = txq->priority; + + cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo = + txq->ib.ib_seg_host_addr.lsb; + cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi = + txq->ib.ib_seg_host_addr.msb; + cfg_req->q_cfg[i].ib.intr.msix_index = + htons((u16)txq->ib.intr_vector); + } + + cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED; + cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX) + ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; + cfg_req->ib_cfg.coalescing_timeout = + htonl((u32)txq->ib.coalescing_timeo); + cfg_req->ib_cfg.inter_pkt_timeout = + htonl((u32)txq->ib.interpkt_timeo); + cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count; + + cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI; + cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); + cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED; + cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED; + + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, + sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_bfi_tx_enet_stop(struct bna_tx *tx) +{ + struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; + + bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, + BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); + req->mh.num_entries = htons( + bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req))); + bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), + &req->mh); + bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); +} + +static void +bna_tx_enet_stop(struct bna_tx *tx) +{ + struct bna_txq *txq; + + /* Stop IB */ + list_for_each_entry(txq, &tx->txq_q, qe) + bna_ib_stop(tx->bna, &txq->ib); + + bna_bfi_tx_enet_stop(tx); +} + +static void +bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size, + struct bna_mem_descr *qpt_mem, + struct bna_mem_descr *swqpt_mem, + struct bna_mem_descr *page_mem) +{ + u8 *kva; + u64 dma; + struct bna_dma_addr bna_dma; + int i; + + txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb; + txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb; + txq->qpt.kv_qpt_ptr = qpt_mem->kva; + txq->qpt.page_count = page_count; + txq->qpt.page_size = page_size; + + txq->tcb->sw_qpt = (void **) swqpt_mem->kva; + txq->tcb->sw_q = page_mem->kva; + + kva = page_mem->kva; + BNA_GET_DMA_ADDR(&page_mem->dma, dma); + + for (i = 0; i < page_count; i++) { + txq->tcb->sw_qpt[i] = kva; + kva += PAGE_SIZE; + + BNA_SET_DMA_ADDR(dma, &bna_dma); + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb = + bna_dma.lsb; + ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb = + bna_dma.msb; + dma += PAGE_SIZE; + } +} + +static struct bna_tx * +bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx = NULL; + + if (list_empty(&tx_mod->tx_free_q)) + return NULL; + if (type == BNA_TX_T_REGULAR) + tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe); + else + tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe); + list_del(&tx->qe); + tx->type = type; + + return tx; +} + +static void +bna_tx_free(struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; + struct bna_txq *txq; + struct list_head *qe; + + while (!list_empty(&tx->txq_q)) { + txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); + txq->tcb = NULL; + txq->tx = NULL; + list_move_tail(&txq->qe, &tx_mod->txq_free_q); + } + + list_for_each(qe, &tx_mod->tx_active_q) { + if (qe == &tx->qe) { + list_del(&tx->qe); + break; + } + } + + tx->bna = NULL; + tx->priv = NULL; + + list_for_each_prev(qe, &tx_mod->tx_free_q) + if (((struct bna_tx *)qe)->rid < tx->rid) + break; + + list_add(&tx->qe, qe); +} + +static void +bna_tx_start(struct bna_tx *tx) +{ + tx->flags |= BNA_TX_F_ENET_STARTED; + if (tx->flags & BNA_TX_F_ENABLED) + bfa_fsm_send_event(tx, TX_E_START); +} + +static void +bna_tx_stop(struct bna_tx *tx) +{ + tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; + tx->stop_cbarg = &tx->bna->tx_mod; + + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_STOP); +} + +static void +bna_tx_fail(struct bna_tx *tx) +{ + tx->flags &= ~BNA_TX_F_ENET_STARTED; + bfa_fsm_send_event(tx, TX_E_FAIL); +} + +void +bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; + struct bna_txq *txq = NULL; + int i; + + bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, + sizeof(struct bfi_enet_tx_cfg_rsp)); + + tx->hw_id = cfg_rsp->hw_id; + + for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe); + i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) { + /* Setup doorbells */ + txq->tcb->i_dbell->doorbell_addr = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].i_dbell); + txq->tcb->q_dbell = + tx->bna->pcidev.pci_bar_kva + + ntohl(cfg_rsp->q_handles[i].q_dbell); + txq->hw_id = cfg_rsp->q_handles[i].hw_qid; + + /* Initialize producer/consumer indexes */ + (*txq->tcb->hw_consumer_index) = 0; + txq->tcb->producer_index = txq->tcb->consumer_index = 0; + } + + bfa_fsm_send_event(tx, TX_E_STARTED); +} + +void +bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) +{ + bfa_fsm_send_event(tx, TX_E_STOPPED); +} + +void +bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + + list_for_each_entry(tx, &tx_mod->tx_active_q, qe) + bfa_fsm_send_event(tx, TX_E_BW_UPDATE); +} + +void +bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info) +{ + u32 q_size; + u32 page_count; + struct bna_mem_info *mem_info; + + res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = sizeof(struct bna_tcb); + mem_info->num = num_txq; + + q_size = txq_depth * BFI_TXQ_WI_SIZE; + q_size = ALIGN(q_size, PAGE_SIZE); + page_count = q_size >> PAGE_SHIFT; + + res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = page_count * sizeof(struct bna_dma_addr); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_KVA; + mem_info->len = page_count * sizeof(void *); + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = PAGE_SIZE * page_count; + mem_info->num = num_txq; + + res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM; + mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info; + mem_info->mem_type = BNA_MEM_T_DMA; + mem_info->len = BFI_IBIDX_SIZE; + mem_info->num = num_txq; + + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type = + BNA_INTR_T_MSIX; + res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq; +} + +struct bna_tx * +bna_tx_create(struct bna *bna, struct bnad *bnad, + struct bna_tx_config *tx_cfg, + const struct bna_tx_event_cbfn *tx_cbfn, + struct bna_res_info *res_info, void *priv) +{ + struct bna_intr_info *intr_info; + struct bna_tx_mod *tx_mod = &bna->tx_mod; + struct bna_tx *tx; + struct bna_txq *txq; + int page_count; + int i; + + intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) / + PAGE_SIZE; + + /** + * Get resources + */ + + if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq)) + return NULL; + + /* Tx */ + + tx = bna_tx_get(tx_mod, tx_cfg->tx_type); + if (!tx) + return NULL; + tx->bna = bna; + tx->priv = priv; + + /* TxQs */ + + INIT_LIST_HEAD(&tx->txq_q); + for (i = 0; i < tx_cfg->num_txq; i++) { + if (list_empty(&tx_mod->txq_free_q)) + goto err_return; + + txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe); + list_move_tail(&txq->qe, &tx->txq_q); + txq->tx = tx; + } + + /* + * Initialize + */ + + /* Tx */ + + tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; + tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; + /* Following callbacks are mandatory */ + tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; + tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; + tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; + + list_add_tail(&tx->qe, &tx_mod->tx_active_q); + + tx->num_txq = tx_cfg->num_txq; + + tx->flags = 0; + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { + switch (tx->type) { + case BNA_TX_T_REGULAR: + if (!(tx->bna->tx_mod.flags & + BNA_TX_MOD_F_ENET_LOOPBACK)) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + case BNA_TX_T_LOOPBACK: + if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) + tx->flags |= BNA_TX_F_ENET_STARTED; + break; + } + } + + /* TxQ */ + + i = 0; + list_for_each_entry(txq, &tx->txq_q, qe) { + txq->tcb = (struct bna_tcb *) + res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva; + txq->tx_packets = 0; + txq->tx_bytes = 0; + + /* IB */ + txq->ib.ib_seg_host_addr.lsb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb; + txq->ib.ib_seg_host_addr.msb = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb; + txq->ib.ib_seg_host_addr_kva = + res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva; + txq->ib.intr_type = intr_info->intr_type; + txq->ib.intr_vector = (intr_info->num == 1) ? + intr_info->idl[0].vector : + intr_info->idl[i].vector; + if (intr_info->intr_type == BNA_INTR_T_INTX) + txq->ib.intr_vector = BIT(txq->ib.intr_vector); + txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo; + txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO; + txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT; + + /* TCB */ + + txq->tcb->q_depth = tx_cfg->txq_depth; + txq->tcb->unmap_q = (void *) + res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva; + txq->tcb->hw_consumer_index = + (u32 *)txq->ib.ib_seg_host_addr_kva; + txq->tcb->i_dbell = &txq->ib.door_bell; + txq->tcb->intr_type = txq->ib.intr_type; + txq->tcb->intr_vector = txq->ib.intr_vector; + txq->tcb->txq = txq; + txq->tcb->bnad = bnad; + txq->tcb->id = i; + + /* QPT, SWQPT, Pages */ + bna_txq_qpt_setup(txq, page_count, PAGE_SIZE, + &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i], + &res_info[BNA_TX_RES_MEM_T_PAGE]. + res_u.mem_info.mdl[i]); + + /* Callback to bnad for setting up TCB */ + if (tx->tcb_setup_cbfn) + (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); + + if (tx_cfg->num_txq == BFI_TX_MAX_PRIO) + txq->priority = txq->tcb->id; + else + txq->priority = tx_mod->default_prio; + + i++; + } + + tx->txf_vlan_id = 0; + + bfa_fsm_set_state(tx, bna_tx_sm_stopped); + + tx_mod->rid_mask |= BIT(tx->rid); + + return tx; + +err_return: + bna_tx_free(tx); + return NULL; +} + +void +bna_tx_destroy(struct bna_tx *tx) +{ + struct bna_txq *txq; + + list_for_each_entry(txq, &tx->txq_q, qe) + if (tx->tcb_destroy_cbfn) + (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); + + tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid); + bna_tx_free(tx); +} + +void +bna_tx_enable(struct bna_tx *tx) +{ + if (tx->fsm != bna_tx_sm_stopped) + return; + + tx->flags |= BNA_TX_F_ENABLED; + + if (tx->flags & BNA_TX_F_ENET_STARTED) + bfa_fsm_send_event(tx, TX_E_START); +} + +void +bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, + void (*cbfn)(void *, struct bna_tx *)) +{ + if (type == BNA_SOFT_CLEANUP) { + (*cbfn)(tx->bna->bnad, tx); + return; + } + + tx->stop_cbfn = cbfn; + tx->stop_cbarg = tx->bna->bnad; + + tx->flags &= ~BNA_TX_F_ENABLED; + + bfa_fsm_send_event(tx, TX_E_STOP); +} + +void +bna_tx_cleanup_complete(struct bna_tx *tx) +{ + bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); +} + +static void +bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + bfa_wc_down(&tx_mod->tx_stop_wc); +} + +static void +bna_tx_mod_cb_tx_stopped_all(void *arg) +{ + struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg; + + if (tx_mod->stop_cbfn) + tx_mod->stop_cbfn(&tx_mod->bna->enet); + tx_mod->stop_cbfn = NULL; +} + +void +bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna, + struct bna_res_info *res_info) +{ + int i; + + tx_mod->bna = bna; + tx_mod->flags = 0; + + tx_mod->tx = (struct bna_tx *) + res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva; + tx_mod->txq = (struct bna_txq *) + res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva; + + INIT_LIST_HEAD(&tx_mod->tx_free_q); + INIT_LIST_HEAD(&tx_mod->tx_active_q); + + INIT_LIST_HEAD(&tx_mod->txq_free_q); + + for (i = 0; i < bna->ioceth.attr.num_txq; i++) { + tx_mod->tx[i].rid = i; + list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); + list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q); + } + + tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL; + tx_mod->default_prio = 0; + tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED; + tx_mod->iscsi_prio = -1; +} + +void +bna_tx_mod_uninit(struct bna_tx_mod *tx_mod) +{ + tx_mod->bna = NULL; +} + +void +bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + + tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED; + if (type == BNA_TX_T_LOOPBACK) + tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each_entry(tx, &tx_mod->tx_active_q, qe) + if (tx->type == type) + bna_tx_start(tx); +} + +void +bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type) +{ + struct bna_tx *tx; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + tx_mod->stop_cbfn = bna_enet_cb_tx_stopped; + + bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod); + + list_for_each_entry(tx, &tx_mod->tx_active_q, qe) + if (tx->type == type) { + bfa_wc_up(&tx_mod->tx_stop_wc); + bna_tx_stop(tx); + } + + bfa_wc_wait(&tx_mod->tx_stop_wc); +} + +void +bna_tx_mod_fail(struct bna_tx_mod *tx_mod) +{ + struct bna_tx *tx; + + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED; + tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK; + + list_for_each_entry(tx, &tx_mod->tx_active_q, qe) + bna_tx_fail(tx); +} + +void +bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) +{ + struct bna_txq *txq; + + list_for_each_entry(txq, &tx->txq_q, qe) + bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo); +} diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h new file mode 100644 index 0000000000..a5ebd7110e --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -0,0 +1,946 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNA_TYPES_H__ +#define __BNA_TYPES_H__ + +#include "cna.h" +#include "bna_hw_defs.h" +#include "bfa_cee.h" +#include "bfa_msgq.h" + +/* Forward declarations */ + +struct bna_mcam_handle; +struct bna_txq; +struct bna_tx; +struct bna_rxq; +struct bna_cq; +struct bna_rx; +struct bna_rxf; +struct bna_enet; +struct bna; +struct bnad; + +/* Enums, primitive data types */ + +enum bna_status { + BNA_STATUS_T_DISABLED = 0, + BNA_STATUS_T_ENABLED = 1 +}; + +enum bna_cleanup_type { + BNA_HARD_CLEANUP = 0, + BNA_SOFT_CLEANUP = 1 +}; + +enum bna_cb_status { + BNA_CB_SUCCESS = 0, + BNA_CB_FAIL = 1, + BNA_CB_INTERRUPT = 2, + BNA_CB_BUSY = 3, + BNA_CB_INVALID_MAC = 4, + BNA_CB_MCAST_LIST_FULL = 5, + BNA_CB_UCAST_CAM_FULL = 6, + BNA_CB_WAITING = 7, + BNA_CB_NOT_EXEC = 8 +}; + +enum bna_res_type { + BNA_RES_T_MEM = 1, + BNA_RES_T_INTR = 2 +}; + +enum bna_mem_type { + BNA_MEM_T_KVA = 1, + BNA_MEM_T_DMA = 2 +}; + +enum bna_intr_type { + BNA_INTR_T_INTX = 1, + BNA_INTR_T_MSIX = 2 +}; + +enum bna_res_req_type { + BNA_RES_MEM_T_COM = 0, + BNA_RES_MEM_T_ATTR = 1, + BNA_RES_MEM_T_FWTRC = 2, + BNA_RES_MEM_T_STATS = 3, + BNA_RES_T_MAX +}; + +enum bna_mod_res_req_type { + BNA_MOD_RES_MEM_T_TX_ARRAY = 0, + BNA_MOD_RES_MEM_T_TXQ_ARRAY = 1, + BNA_MOD_RES_MEM_T_RX_ARRAY = 2, + BNA_MOD_RES_MEM_T_RXP_ARRAY = 3, + BNA_MOD_RES_MEM_T_RXQ_ARRAY = 4, + BNA_MOD_RES_MEM_T_UCMAC_ARRAY = 5, + BNA_MOD_RES_MEM_T_MCMAC_ARRAY = 6, + BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY = 7, + BNA_MOD_RES_T_MAX +}; + +enum bna_tx_res_req_type { + BNA_TX_RES_MEM_T_TCB = 0, + BNA_TX_RES_MEM_T_UNMAPQ = 1, + BNA_TX_RES_MEM_T_QPT = 2, + BNA_TX_RES_MEM_T_SWQPT = 3, + BNA_TX_RES_MEM_T_PAGE = 4, + BNA_TX_RES_MEM_T_IBIDX = 5, + BNA_TX_RES_INTR_T_TXCMPL = 6, + BNA_TX_RES_T_MAX, +}; + +enum bna_rx_mem_type { + BNA_RX_RES_MEM_T_CCB = 0, /* CQ context */ + BNA_RX_RES_MEM_T_RCB = 1, /* CQ context */ + BNA_RX_RES_MEM_T_UNMAPHQ = 2, + BNA_RX_RES_MEM_T_UNMAPDQ = 3, + BNA_RX_RES_MEM_T_CQPT = 4, + BNA_RX_RES_MEM_T_CSWQPT = 5, + BNA_RX_RES_MEM_T_CQPT_PAGE = 6, + BNA_RX_RES_MEM_T_HQPT = 7, + BNA_RX_RES_MEM_T_DQPT = 8, + BNA_RX_RES_MEM_T_HSWQPT = 9, + BNA_RX_RES_MEM_T_DSWQPT = 10, + BNA_RX_RES_MEM_T_DPAGE = 11, + BNA_RX_RES_MEM_T_HPAGE = 12, + BNA_RX_RES_MEM_T_IBIDX = 13, + BNA_RX_RES_MEM_T_RIT = 14, + BNA_RX_RES_T_INTR = 15, + BNA_RX_RES_T_MAX = 16 +}; + +enum bna_tx_type { + BNA_TX_T_REGULAR = 0, + BNA_TX_T_LOOPBACK = 1, +}; + +enum bna_tx_flags { + BNA_TX_F_ENET_STARTED = 1, + BNA_TX_F_ENABLED = 2, + BNA_TX_F_BW_UPDATED = 8, +}; + +enum bna_tx_mod_flags { + BNA_TX_MOD_F_ENET_STARTED = 1, + BNA_TX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rx_type { + BNA_RX_T_REGULAR = 0, + BNA_RX_T_LOOPBACK = 1, +}; + +enum bna_rxp_type { + BNA_RXP_SINGLE = 1, + BNA_RXP_SLR = 2, + BNA_RXP_HDS = 3 +}; + +enum bna_rxmode { + BNA_RXMODE_PROMISC = 1, + BNA_RXMODE_DEFAULT = 2, + BNA_RXMODE_ALLMULTI = 4 +}; + +enum bna_rx_event { + RX_E_START = 1, + RX_E_STOP = 2, + RX_E_FAIL = 3, + RX_E_STARTED = 4, + RX_E_STOPPED = 5, + RX_E_RXF_STARTED = 6, + RX_E_RXF_STOPPED = 7, + RX_E_CLEANUP_DONE = 8, +}; + +enum bna_rx_flags { + BNA_RX_F_ENET_STARTED = 1, + BNA_RX_F_ENABLED = 2, +}; + +enum bna_rx_mod_flags { + BNA_RX_MOD_F_ENET_STARTED = 1, + BNA_RX_MOD_F_ENET_LOOPBACK = 2, +}; + +enum bna_rxf_event { + RXF_E_START = 1, + RXF_E_STOP = 2, + RXF_E_FAIL = 3, + RXF_E_CONFIG = 4, + RXF_E_FW_RESP = 7, +}; + +enum bna_enet_type { + BNA_ENET_T_REGULAR = 0, + BNA_ENET_T_LOOPBACK_INTERNAL = 1, + BNA_ENET_T_LOOPBACK_EXTERNAL = 2, +}; + +enum bna_link_status { + BNA_LINK_DOWN = 0, + BNA_LINK_UP = 1, + BNA_CEE_UP = 2 +}; + +enum bna_ethport_flags { + BNA_ETHPORT_F_ADMIN_UP = 1, + BNA_ETHPORT_F_PORT_ENABLED = 2, + BNA_ETHPORT_F_RX_STARTED = 4, +}; + +enum bna_enet_flags { + BNA_ENET_F_IOCETH_READY = 1, + BNA_ENET_F_ENABLED = 2, + BNA_ENET_F_PAUSE_CHANGED = 4, + BNA_ENET_F_MTU_CHANGED = 8 +}; + +enum bna_rss_flags { + BNA_RSS_F_RIT_PENDING = 1, + BNA_RSS_F_CFG_PENDING = 2, + BNA_RSS_F_STATUS_PENDING = 4, +}; + +enum bna_mod_flags { + BNA_MOD_F_INIT_DONE = 1, +}; + +enum bna_pkt_rates { + BNA_PKT_RATE_10K = 10000, + BNA_PKT_RATE_20K = 20000, + BNA_PKT_RATE_30K = 30000, + BNA_PKT_RATE_40K = 40000, + BNA_PKT_RATE_50K = 50000, + BNA_PKT_RATE_60K = 60000, + BNA_PKT_RATE_70K = 70000, + BNA_PKT_RATE_80K = 80000, +}; + +enum bna_dim_load_types { + BNA_LOAD_T_HIGH_4 = 0, /* 80K <= r */ + BNA_LOAD_T_HIGH_3 = 1, /* 60K <= r < 80K */ + BNA_LOAD_T_HIGH_2 = 2, /* 50K <= r < 60K */ + BNA_LOAD_T_HIGH_1 = 3, /* 40K <= r < 50K */ + BNA_LOAD_T_LOW_1 = 4, /* 30K <= r < 40K */ + BNA_LOAD_T_LOW_2 = 5, /* 20K <= r < 30K */ + BNA_LOAD_T_LOW_3 = 6, /* 10K <= r < 20K */ + BNA_LOAD_T_LOW_4 = 7, /* r < 10K */ + BNA_LOAD_T_MAX = 8 +}; + +enum bna_dim_bias_types { + BNA_BIAS_T_SMALL = 0, /* small pkts > (large pkts * 2) */ + BNA_BIAS_T_LARGE = 1, /* Not BNA_BIAS_T_SMALL */ + BNA_BIAS_T_MAX = 2 +}; + +#define BNA_MAX_NAME_SIZE 64 +struct bna_ident { + int id; + char name[BNA_MAX_NAME_SIZE]; +}; + +struct bna_mac { + /* This should be the first one */ + struct list_head qe; + u8 addr[ETH_ALEN]; + struct bna_mcam_handle *handle; +}; + +struct bna_mem_descr { + u32 len; + void *kva; + struct bna_dma_addr dma; +}; + +struct bna_mem_info { + enum bna_mem_type mem_type; + u32 len; + u32 num; + u32 align_sz; /* 0/1 = no alignment */ + struct bna_mem_descr *mdl; + void *cookie; /* For bnad to unmap dma later */ +}; + +struct bna_intr_descr { + int vector; +}; + +struct bna_intr_info { + enum bna_intr_type intr_type; + int num; + struct bna_intr_descr *idl; +}; + +union bna_res_u { + struct bna_mem_info mem_info; + struct bna_intr_info intr_info; +}; + +struct bna_res_info { + enum bna_res_type res_type; + union bna_res_u res_u; +}; + +/* HW QPT */ +struct bna_qpt { + struct bna_dma_addr hw_qpt_ptr; + void *kv_qpt_ptr; + u32 page_count; + u32 page_size; +}; + +struct bna_attr { + bool fw_query_complete; + int num_txq; + int num_rxp; + int num_ucmac; + int num_mcmac; + int max_rit_size; +}; + +/* IOCEth */ + +enum bna_ioceth_event; + +struct bna_ioceth { + void (*fsm)(struct bna_ioceth *s, enum bna_ioceth_event e); + struct bfa_ioc ioc; + + struct bna_attr attr; + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_attr_req attr_req; + + void (*stop_cbfn)(struct bnad *bnad); + struct bnad *stop_cbarg; + + struct bna *bna; +}; + +/* Enet */ + +/* Pause configuration */ +struct bna_pause_config { + enum bna_status tx_pause; + enum bna_status rx_pause; +}; + +enum bna_enet_event; + +struct bna_enet { + void (*fsm)(struct bna_enet *s, enum bna_enet_event e); + enum bna_enet_flags flags; + + enum bna_enet_type type; + + struct bna_pause_config pause_config; + int mtu; + + /* Callback for bna_enet_disable(), enet_stop() */ + void (*stop_cbfn)(void *); + void *stop_cbarg; + + /* Callback for bna_enet_mtu_set() */ + void (*mtu_cbfn)(struct bnad *); + + struct bfa_wc chld_stop_wc; + + struct bfa_msgq_cmd_entry msgq_cmd; + struct bfi_enet_set_pause_req pause_req; + + struct bna *bna; +}; + +/* Ethport */ + +enum bna_ethport_event; + +struct bna_ethport { + void (*fsm)(struct bna_ethport *s, enum bna_ethport_event e); + enum bna_ethport_flags flags; + + enum bna_link_status link_status; + + int rx_started_count; + + void (*stop_cbfn)(struct bna_enet *); + + void (*adminup_cbfn)(struct bnad *, enum bna_cb_status); + + void (*link_cbfn)(struct bnad *, enum bna_link_status); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req admin_req; + struct bfi_enet_diag_lb_req lpbk_req; + } bfi_enet_cmd; + + struct bna *bna; +}; + +/* Interrupt Block */ + +/* Doorbell structure */ +struct bna_ib_dbell { + void __iomem *doorbell_addr; + u32 doorbell_ack; +}; + +/* IB structure */ +struct bna_ib { + struct bna_dma_addr ib_seg_host_addr; + void *ib_seg_host_addr_kva; + + struct bna_ib_dbell door_bell; + + enum bna_intr_type intr_type; + int intr_vector; + + u8 coalescing_timeo; /* Unit is 5usec. */ + + int interpkt_count; + int interpkt_timeo; +}; + +/* Tx object */ + +/* Tx datapath control structure */ +#define BNA_Q_NAME_SIZE 16 +struct bna_tcb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + volatile u32 *hw_consumer_index; + u32 q_depth; + void __iomem *q_dbell; + struct bna_ib_dbell *i_dbell; + /* Control path */ + struct bna_txq *txq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 priority; /* Current priority */ + unsigned long flags; /* Used by bnad as required */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* TxQ QPT and configuration */ +struct bna_txq { + /* This should be the first one */ + struct list_head qe; + + u8 priority; + + struct bna_qpt qpt; + struct bna_tcb *tcb; + struct bna_ib ib; + + struct bna_tx *tx; + + int hw_id; + + u64 tx_packets; + u64 tx_bytes; +}; + +/* Tx object */ + +enum bna_tx_event; + +struct bna_tx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + void (*fsm)(struct bna_tx *s, enum bna_tx_event e); + enum bna_tx_flags flags; + + enum bna_tx_type type; + int num_txq; + + struct list_head txq_q; + u16 txf_vlan_id; + + /* Tx event handlers */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); + + /* callback for bna_tx_disable(), bna_tx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_tx *tx); + void *stop_cbarg; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_tx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_tx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +/* Tx object configuration used during creation */ +struct bna_tx_config { + int num_txq; + int txq_depth; + int coalescing_timeo; + enum bna_tx_type tx_type; +}; + +struct bna_tx_event_cbfn { + /* Optional */ + void (*tcb_setup_cbfn)(struct bnad *, struct bna_tcb *); + void (*tcb_destroy_cbfn)(struct bnad *, struct bna_tcb *); + /* Mandatory */ + void (*tx_stall_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_resume_cbfn)(struct bnad *, struct bna_tx *); + void (*tx_cleanup_cbfn)(struct bnad *, struct bna_tx *); +}; + +/* Tx module - keeps track of free, active tx objects */ +struct bna_tx_mod { + struct bna_tx *tx; /* BFI_MAX_TXQ entries */ + struct bna_txq *txq; /* BFI_MAX_TXQ entries */ + + struct list_head tx_free_q; + struct list_head tx_active_q; + + struct list_head txq_free_q; + + /* callback for bna_tx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc tx_stop_wc; + + enum bna_tx_mod_flags flags; + + u8 prio_map; + int default_prio; + int iscsi_over_cee; + int iscsi_prio; + int prio_reconfigured; + + u32 rid_mask; + + struct bna *bna; +}; + +/* Rx object */ + +/* Rx datapath control structure */ +struct bna_rcb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + void *unmap_q; + u32 producer_index; + u32 consumer_index; + u32 q_depth; + void __iomem *q_dbell; + /* Control path */ + struct bna_rxq *rxq; + struct bna_ccb *ccb; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + unsigned long flags; + int id; +}; + +/* RxQ structure - QPT, configuration */ +struct bna_rxq { + struct list_head qe; + + int buffer_size; + int q_depth; + u32 num_vecs; + enum bna_status multi_buffer; + + struct bna_qpt qpt; + struct bna_rcb *rcb; + + struct bna_rxp *rxp; + struct bna_rx *rx; + + int hw_id; + + u64 rx_packets; + u64 rx_bytes; + u64 rx_packets_with_error; + u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; +}; + +/* RxQ pair */ +union bna_rxq_u { + struct { + struct bna_rxq *hdr; + struct bna_rxq *data; + } hds; + struct { + struct bna_rxq *small; + struct bna_rxq *large; + } slr; + struct { + struct bna_rxq *only; + struct bna_rxq *reserved; + } single; +}; + +/* Packet rate for Dynamic Interrupt Moderation */ +struct bna_pkt_rate { + u32 small_pkt_cnt; + u32 large_pkt_cnt; +}; + +/* Completion control structure */ +struct bna_ccb { + /* Fast path */ + void **sw_qpt; + void *sw_q; + u32 producer_index; + volatile u32 *hw_producer_index; + u32 q_depth; + struct bna_ib_dbell *i_dbell; + struct bna_rcb *rcb[2]; + void *ctrl; /* For bnad */ + struct bna_pkt_rate pkt_rate; + u32 pkts_una; + u32 bytes_per_intr; + + /* Control path */ + struct bna_cq *cq; + struct bnad *bnad; + void *priv; /* BNAD's cookie */ + enum bna_intr_type intr_type; + int intr_vector; + u8 rx_coalescing_timeo; /* For NAPI */ + int id; + char name[BNA_Q_NAME_SIZE]; +}; + +/* CQ QPT, configuration */ +struct bna_cq { + struct bna_qpt qpt; + struct bna_ccb *ccb; + + struct bna_ib ib; + + struct bna_rx *rx; +}; + +struct bna_rss_config { + enum bfi_enet_rss_type hash_type; + u8 hash_mask; + u32 toeplitz_hash_key[BFI_ENET_RSS_KEY_LEN]; +}; + +struct bna_hds_config { + enum bfi_enet_hds_type hdr_type; + int forced_offset; +}; + +/* Rx object configuration used during creation */ +struct bna_rx_config { + enum bna_rx_type rx_type; + int num_paths; + enum bna_rxp_type rxp_type; + int coalescing_timeo; + /* + * Small/Large (or Header/Data) buffer size to be configured + * for SLR and HDS queue type. + */ + u32 frame_size; + + /* header or small queue */ + u32 q1_depth; + u32 q1_buf_size; + + /* data or large queue */ + u32 q0_depth; + u32 q0_buf_size; + u32 q0_num_vecs; + enum bna_status q0_multi_buf; + + enum bna_status rss_status; + struct bna_rss_config rss_config; + + struct bna_hds_config hds_config; + + enum bna_status vlan_strip_status; +}; + +/* Rx Path structure - one per MSIX vector/CPU */ +struct bna_rxp { + /* This should be the first one */ + struct list_head qe; + + enum bna_rxp_type type; + union bna_rxq_u rxq; + struct bna_cq cq; + + struct bna_rx *rx; + + /* MSI-x vector number for configuring RSS */ + int vector; + int hw_id; +}; + +/* RxF structure (hardware Rx Function) */ + +enum bna_rxf_event; + +struct bna_rxf { + void (*fsm)(struct bna_rxf *s, enum bna_rxf_event e); + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_enable_req req; + struct bfi_enet_rss_cfg_req rss_req; + struct bfi_enet_rit_req rit_req; + struct bfi_enet_rx_vlan_req vlan_req; + struct bfi_enet_mcast_add_req mcast_add_req; + struct bfi_enet_mcast_del_req mcast_del_req; + struct bfi_enet_ucast_req ucast_req; + } bfi_enet_cmd; + + /* callback for bna_rxf_start() */ + void (*start_cbfn) (struct bna_rx *rx); + struct bna_rx *start_cbarg; + + /* callback for bna_rxf_stop() */ + void (*stop_cbfn) (struct bna_rx *rx); + struct bna_rx *stop_cbarg; + + /** + * callback for: + * bna_rxf_ucast_set() + * bna_rxf_{ucast/mcast}_add(), + * bna_rxf_{ucast/mcast}_del(), + * bna_rxf_mode_set() + */ + void (*cam_fltr_cbfn)(struct bnad *bnad, struct bna_rx *rx); + struct bnad *cam_fltr_cbarg; + + /* List of unicast addresses yet to be applied to h/w */ + struct list_head ucast_pending_add_q; + struct list_head ucast_pending_del_q; + struct bna_mac *ucast_pending_mac; + int ucast_pending_set; + /* ucast addresses applied to the h/w */ + struct list_head ucast_active_q; + struct bna_mac ucast_active_mac; + int ucast_active_set; + + /* List of multicast addresses yet to be applied to h/w */ + struct list_head mcast_pending_add_q; + struct list_head mcast_pending_del_q; + /* multicast addresses applied to the h/w */ + struct list_head mcast_active_q; + struct list_head mcast_handle_q; + + /* Rx modes yet to be applied to h/w */ + enum bna_rxmode rxmode_pending; + enum bna_rxmode rxmode_pending_bitmask; + /* Rx modes applied to h/w */ + enum bna_rxmode rxmode_active; + + u8 vlan_pending_bitmask; + enum bna_status vlan_filter_status; + u32 vlan_filter_table[(BFI_ENET_VLAN_ID_MAX) / 32]; + bool vlan_strip_pending; + enum bna_status vlan_strip_status; + + enum bna_rss_flags rss_pending; + enum bna_status rss_status; + struct bna_rss_config rss_cfg; + u8 *rit; + int rit_size; + + struct bna_rx *rx; +}; + +/* Rx object */ + +enum bna_rx_event; + +struct bna_rx { + /* This should be the first one */ + struct list_head qe; + int rid; + int hw_id; + + void (*fsm)(struct bna_rx *s, enum bna_rx_event e); + + enum bna_rx_type type; + + int num_paths; + struct list_head rxp_q; + + struct bna_hds_config hds_cfg; + + struct bna_rxf rxf; + + enum bna_rx_flags rx_flags; + + struct bfa_msgq_cmd_entry msgq_cmd; + union { + struct bfi_enet_rx_cfg_req cfg_req; + struct bfi_enet_req req; + struct bfi_enet_rx_cfg_rsp cfg_rsp; + } bfi_enet_cmd; + + /* Rx event handlers */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); + + /* callback for bna_rx_disable(), bna_rx_stop() */ + void (*stop_cbfn)(void *arg, struct bna_rx *rx); + void *stop_cbarg; + + struct bna *bna; + void *priv; /* bnad's cookie */ +}; + +struct bna_rx_event_cbfn { + /* Optional */ + void (*rcb_setup_cbfn)(struct bnad *, struct bna_rcb *); + void (*rcb_destroy_cbfn)(struct bnad *, struct bna_rcb *); + void (*ccb_setup_cbfn)(struct bnad *, struct bna_ccb *); + void (*ccb_destroy_cbfn)(struct bnad *, struct bna_ccb *); + void (*rx_stall_cbfn)(struct bnad *, struct bna_rx *); + /* Mandatory */ + void (*rx_cleanup_cbfn)(struct bnad *, struct bna_rx *); + void (*rx_post_cbfn)(struct bnad *, struct bna_rx *); +}; + +/* Rx module - keeps track of free, active rx objects */ +struct bna_rx_mod { + struct bna *bna; /* back pointer to parent */ + struct bna_rx *rx; /* BFI_MAX_RXQ entries */ + struct bna_rxp *rxp; /* BFI_MAX_RXQ entries */ + struct bna_rxq *rxq; /* BFI_MAX_RXQ entries */ + + struct list_head rx_free_q; + struct list_head rx_active_q; + int rx_free_count; + + struct list_head rxp_free_q; + int rxp_free_count; + + struct list_head rxq_free_q; + int rxq_free_count; + + enum bna_rx_mod_flags flags; + + /* callback for bna_rx_mod_stop() */ + void (*stop_cbfn)(struct bna_enet *enet); + + struct bfa_wc rx_stop_wc; + u32 dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX]; + u32 rid_mask; +}; + +/* CAM */ + +struct bna_ucam_mod { + struct bna_mac *ucmac; /* num_ucmac * 2 entries */ + struct list_head free_q; + struct list_head del_q; + + struct bna *bna; +}; + +struct bna_mcam_handle { + /* This should be the first one */ + struct list_head qe; + int handle; + int refcnt; +}; + +struct bna_mcam_mod { + struct bna_mac *mcmac; /* num_mcmac * 2 entries */ + struct bna_mcam_handle *mchandle; /* num_mcmac entries */ + struct list_head free_q; + struct list_head del_q; + struct list_head free_handle_q; + + struct bna *bna; +}; + +/* Statistics */ + +struct bna_stats { + struct bna_dma_addr hw_stats_dma; + struct bfi_enet_stats *hw_stats_kva; + struct bfi_enet_stats hw_stats; +}; + +struct bna_stats_mod { + bool ioc_ready; + bool stats_get_busy; + bool stats_clr_busy; + struct bfa_msgq_cmd_entry stats_get_cmd; + struct bfa_msgq_cmd_entry stats_clr_cmd; + struct bfi_enet_stats_req stats_get; + struct bfi_enet_stats_req stats_clr; +}; + +/* BNA */ + +struct bna { + struct bna_ident ident; + struct bfa_pcidev pcidev; + + struct bna_reg regs; + struct bna_bit_defn bits; + + struct bna_stats stats; + + struct bna_ioceth ioceth; + struct bfa_cee cee; + struct bfa_flash flash; + struct bfa_msgq msgq; + + struct bna_ethport ethport; + struct bna_enet enet; + struct bna_stats_mod stats_mod; + + struct bna_tx_mod tx_mod; + struct bna_rx_mod rx_mod; + struct bna_ucam_mod ucam_mod; + struct bna_mcam_mod mcam_mod; + + enum bna_mod_flags mod_flags; + + int default_mode_rid; + int promisc_rid; + + struct bnad *bnad; +}; +#endif /* __BNA_TYPES_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c new file mode 100644 index 0000000000..31191b520b --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -0,0 +1,3848 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include <linux/bitops.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/etherdevice.h> +#include <linux/in.h> +#include <linux/ethtool.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> +#include <linux/ip.h> +#include <linux/prefetch.h> +#include <linux/module.h> + +#include "bnad.h" +#include "bna.h" +#include "cna.h" + +static DEFINE_MUTEX(bnad_fwimg_mutex); + +/* + * Module params + */ +static uint bnad_msix_disable; +module_param(bnad_msix_disable, uint, 0444); +MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode"); + +static uint bnad_ioc_auto_recover = 1; +module_param(bnad_ioc_auto_recover, uint, 0444); +MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery"); + +static uint bna_debugfs_enable = 1; +module_param(bna_debugfs_enable, uint, 0644); +MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1," + " Range[false:0|true:1]"); + +/* + * Global variables + */ +static u32 bnad_rxqs_per_cq = 2; +static atomic_t bna_id; +static const u8 bnad_bcast_addr[] __aligned(2) = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + +/* + * Local MACROS + */ +#define BNAD_GET_MBOX_IRQ(_bnad) \ + (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \ + ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \ + ((_bnad)->pcidev->irq)) + +#define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \ +do { \ + (_res_info)->res_type = BNA_RES_T_MEM; \ + (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \ + (_res_info)->res_u.mem_info.num = (_num); \ + (_res_info)->res_u.mem_info.len = (_size); \ +} while (0) + +/* + * Reinitialize completions in CQ, once Rx is taken down + */ +static void +bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bna_cq_entry *cmpl; + int i; + + for (i = 0; i < ccb->q_depth; i++) { + cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i]; + cmpl->valid = 0; + } +} + +/* Tx Datapath functions */ + + +/* Caller should ensure that the entry at unmap_q[index] is valid */ +static u32 +bnad_tx_buff_unmap(struct bnad *bnad, + struct bnad_tx_unmap *unmap_q, + u32 q_depth, u32 index) +{ + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; + int vector, nvecs; + + unmap = &unmap_q[index]; + nvecs = unmap->nvecs; + + skb = unmap->skb; + unmap->skb = NULL; + unmap->nvecs = 0; + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[0], dma_addr), + skb_headlen(skb), DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0); + nvecs--; + + vector = 0; + while (nvecs) { + vector++; + if (vector == BFI_TX_MAX_VECTORS_PER_WI) { + vector = 0; + BNA_QE_INDX_INC(index, q_depth); + unmap = &unmap_q[index]; + } + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vectors[vector], dma_addr), + dma_unmap_len(&unmap->vectors[vector], dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0); + nvecs--; + } + + BNA_QE_INDX_INC(index, q_depth); + + return index; +} + +/* + * Frees all pending Tx Bufs + * At this point no activity is expected on the Q, + * so DMA unmap & freeing is fine. + */ +static void +bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct sk_buff *skb; + int i; + + for (i = 0; i < tcb->q_depth; i++) { + skb = unmap_q[i].skb; + if (!skb) + continue; + bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); + + dev_kfree_skb_any(skb); + } +} + +/* + * bnad_txcmpl_process : Frees the Tx bufs on Tx completion + * Can be called in a) Interrupt context + * b) Sending context + */ +static u32 +bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) +{ + u32 sent_packets = 0, sent_bytes = 0; + u32 wis, unmap_wis, hw_cons, cons, q_depth; + struct bnad_tx_unmap *unmap_q = tcb->unmap_q; + struct bnad_tx_unmap *unmap; + struct sk_buff *skb; + + /* Just return if TX is stopped */ + if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + return 0; + + hw_cons = *(tcb->hw_consumer_index); + rmb(); + cons = tcb->consumer_index; + q_depth = tcb->q_depth; + + wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); + BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); + + while (wis) { + unmap = &unmap_q[cons]; + + skb = unmap->skb; + + sent_packets++; + sent_bytes += skb->len; + + unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs); + wis -= unmap_wis; + + cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons); + dev_kfree_skb_any(skb); + } + + /* Update consumer pointers. */ + tcb->consumer_index = hw_cons; + + tcb->txq->tx_packets += sent_packets; + tcb->txq->tx_bytes += sent_bytes; + + return sent_packets; +} + +static u32 +bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct net_device *netdev = bnad->netdev; + u32 sent = 0; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) + return 0; + + sent = bnad_txcmpl_process(bnad, tcb); + if (sent) { + if (netif_queue_stopped(netdev) && + netif_carrier_ok(netdev) && + BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= + BNAD_NETIF_WAKE_THRESHOLD) { + if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + } + + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + + return sent; +} + +/* MSIX Tx Completion Handler */ +static irqreturn_t +bnad_msix_tx(int irq, void *data) +{ + struct bna_tcb *tcb = (struct bna_tcb *)data; + struct bnad *bnad = tcb->bnad; + + bnad_tx_complete(bnad, tcb); + + return IRQ_HANDLED; +} + +static inline void +bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + + unmap_q->reuse_pi = -1; + unmap_q->alloc_order = -1; + unmap_q->map_size = 0; + unmap_q->type = BNAD_RXBUF_NONE; +} + +/* Default is page-based allocation. Multi-buffer support - TBD */ +static int +bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int order; + + bnad_rxq_alloc_uninit(bnad, rcb); + + order = get_order(rcb->rxq->buffer_size); + + unmap_q->type = BNAD_RXBUF_PAGE; + + if (bna_is_small_rxq(rcb->id)) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + } else { + if (rcb->rxq->multi_buffer) { + unmap_q->alloc_order = 0; + unmap_q->map_size = rcb->rxq->buffer_size; + unmap_q->type = BNAD_RXBUF_MULTI_BUFF; + } else { + unmap_q->alloc_order = order; + unmap_q->map_size = + (rcb->rxq->buffer_size > 2048) ? + PAGE_SIZE << order : 2048; + } + } + + BUG_ON((PAGE_SIZE << order) % unmap_q->map_size); + + return 0; +} + +static inline void +bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap) +{ + if (!unmap->page) + return; + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + put_page(unmap->page); + unmap->page = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; +} + +static inline void +bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap) +{ + if (!unmap->skb) + return; + + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + dev_kfree_skb_any(unmap->skb); + unmap->skb = NULL; + dma_unmap_addr_set(&unmap->vector, dma_addr, 0); + unmap->vector.len = 0; +} + +static void +bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + int i; + + for (i = 0; i < rcb->q_depth; i++) { + struct bnad_rx_unmap *unmap = &unmap_q->unmap[i]; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); + } + bnad_rxq_alloc_uninit(bnad, rcb); +} + +static u32 +bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) +{ + u32 alloced, prod, q_depth; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap, *prev; + struct bna_rxq_entry *rxent; + struct page *page; + u32 page_offset, alloc_size; + dma_addr_t dma_addr; + + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloc_size = PAGE_SIZE << unmap_q->alloc_order; + alloced = 0; + + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + if (unmap_q->reuse_pi < 0) { + page = alloc_pages(GFP_ATOMIC | __GFP_COMP, + unmap_q->alloc_order); + page_offset = 0; + } else { + prev = &unmap_q->unmap[unmap_q->reuse_pi]; + page = prev->page; + page_offset = prev->page_offset + unmap_q->map_size; + get_page(page); + } + + if (unlikely(!page)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + + dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset, + unmap_q->map_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + put_page(page); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } + + unmap->page = page; + unmap->page_offset = page_offset; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = unmap_q->map_size; + page_offset += unmap_q->map_size; + + if (page_offset < alloc_size) + unmap_q->reuse_pi = prod; + else + unmap_q->reuse_pi = -1; + + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); + alloced++; + } + +finishing: + if (likely(alloced)) { + rcb->producer_index = prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } + + return alloced; +} + +static u32 +bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc) +{ + u32 alloced, prod, q_depth, buff_sz; + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + struct bnad_rx_unmap *unmap; + struct bna_rxq_entry *rxent; + struct sk_buff *skb; + dma_addr_t dma_addr; + + buff_sz = rcb->rxq->buffer_size; + prod = rcb->producer_index; + q_depth = rcb->q_depth; + + alloced = 0; + while (nalloc--) { + unmap = &unmap_q->unmap[prod]; + + skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz); + + if (unlikely(!skb)) { + BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed); + rcb->rxq->rxbuf_alloc_failed++; + goto finishing; + } + + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + buff_sz, DMA_FROM_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, rxbuf_map_failed); + rcb->rxq->rxbuf_map_failed++; + goto finishing; + } + + unmap->skb = skb; + dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr); + unmap->vector.len = buff_sz; + + rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod]; + BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr); + BNA_QE_INDX_INC(prod, q_depth); + alloced++; + } + +finishing: + if (likely(alloced)) { + rcb->producer_index = prod; + smp_mb(); + if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags))) + bna_rxq_prod_indx_doorbell(rcb); + } + + return alloced; +} + +static inline void +bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb) +{ + struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q; + u32 to_alloc; + + to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth); + if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)) + return; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_refill_skb(bnad, rcb, to_alloc); + else + bnad_rxq_refill_page(bnad, rcb, to_alloc); +} + +#define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \ + BNA_CQ_EF_L4_CKSUM_OK) + +#define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_tcp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) +#define flags_udp6 (BNA_CQ_EF_IPV6 | \ + BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK) + +static void +bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb, + u32 sop_ci, u32 nvecs) +{ + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap; + u32 ci, vec; + + unmap_q = rcb->unmap_q; + for (vec = 0, ci = sop_ci; vec < nvecs; vec++) { + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_rxq_cleanup_skb(bnad, unmap); + else + bnad_rxq_cleanup_page(bnad, unmap); + } +} + +static void +bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs) +{ + struct bna_rcb *rcb; + struct bnad *bnad; + struct bnad_rx_unmap_q *unmap_q; + struct bna_cq_entry *cq, *cmpl; + u32 ci, pi, totlen = 0; + + cq = ccb->sw_q; + pi = ccb->producer_index; + cmpl = &cq[pi]; + + rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0]; + unmap_q = rcb->unmap_q; + bnad = rcb->bnad; + ci = rcb->consumer_index; + + /* prefetch header */ + prefetch(page_address(unmap_q->unmap[ci].page) + + unmap_q->unmap[ci].page_offset); + + while (nvecs--) { + struct bnad_rx_unmap *unmap; + u32 len; + + unmap = &unmap_q->unmap[ci]; + BNA_QE_INDX_INC(ci, rcb->q_depth); + + dma_unmap_page(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + len = ntohs(cmpl->length); + skb->truesize += unmap->vector.len; + totlen += len; + + skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, + unmap->page, unmap->page_offset, len); + + unmap->page = NULL; + unmap->vector.len = 0; + + BNA_QE_INDX_INC(pi, ccb->q_depth); + cmpl = &cq[pi]; + } + + skb->len += totlen; + skb->data_len += totlen; +} + +static inline void +bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb, + struct bnad_rx_unmap *unmap, u32 len) +{ + prefetch(skb->data); + + dma_unmap_single(&bnad->pcidev->dev, + dma_unmap_addr(&unmap->vector, dma_addr), + unmap->vector.len, DMA_FROM_DEVICE); + + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, bnad->netdev); + + unmap->skb = NULL; + unmap->vector.len = 0; +} + +static u32 +bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) +{ + struct bna_cq_entry *cq, *cmpl, *next_cmpl; + struct bna_rcb *rcb = NULL; + struct bnad_rx_unmap_q *unmap_q; + struct bnad_rx_unmap *unmap = NULL; + struct sk_buff *skb = NULL; + struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; + struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl; + u32 packets = 0, len = 0, totlen = 0; + u32 pi, vec, sop_ci = 0, nvecs = 0; + u32 flags, masked_flags; + + prefetch(bnad->netdev); + + cq = ccb->sw_q; + + while (packets < budget) { + cmpl = &cq[ccb->producer_index]; + if (!cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only after writing + * the other fields of completion entry. Hence, do not load + * other fields of completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the compiler and/or + * CPU from reordering the reads which would potentially result + * in reading stale values in completion entry. + */ + rmb(); + + BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length)); + + if (bna_is_small_rxq(cmpl->rxq_id)) + rcb = ccb->rcb[1]; + else + rcb = ccb->rcb[0]; + + unmap_q = rcb->unmap_q; + + /* start of packet ci */ + sop_ci = rcb->consumer_index; + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) { + unmap = &unmap_q->unmap[sop_ci]; + skb = unmap->skb; + } else { + skb = napi_get_frags(&rx_ctrl->napi); + if (unlikely(!skb)) + break; + } + prefetch(skb); + + flags = ntohl(cmpl->flags); + len = ntohs(cmpl->length); + totlen = len; + nvecs = 1; + + /* Check all the completions for this frame. + * busy-wait doesn't help much, break here. + */ + if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) && + (flags & BNA_CQ_EF_EOP) == 0) { + pi = ccb->producer_index; + do { + BNA_QE_INDX_INC(pi, ccb->q_depth); + next_cmpl = &cq[pi]; + + if (!next_cmpl->valid) + break; + /* The 'valid' field is set by the adapter, only + * after writing the other fields of completion + * entry. Hence, do not load other fields of + * completion entry *before* the 'valid' is + * loaded. Adding the rmb() here prevents the + * compiler and/or CPU from reordering the reads + * which would potentially result in reading + * stale values in completion entry. + */ + rmb(); + + len = ntohs(next_cmpl->length); + flags = ntohl(next_cmpl->flags); + + nvecs++; + totlen += len; + } while ((flags & BNA_CQ_EF_EOP) == 0); + + if (!next_cmpl->valid) + break; + } + packets++; + + /* TODO: BNA_CQ_EF_LOCAL ? */ + if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | + BNA_CQ_EF_FCS_ERROR | + BNA_CQ_EF_TOO_LONG))) { + bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs); + rcb->rxq->rx_packets_with_error++; + + goto next; + } + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + bnad_cq_setup_skb(bnad, skb, unmap, len); + else + bnad_cq_setup_skb_frags(ccb, skb, nvecs); + + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += totlen; + ccb->bytes_per_intr += totlen; + + masked_flags = flags & flags_cksum_prot_mask; + + if (likely + ((bnad->netdev->features & NETIF_F_RXCSUM) && + ((masked_flags == flags_tcp4) || + (masked_flags == flags_udp4) || + (masked_flags == flags_tcp6) || + (masked_flags == flags_udp6)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + + if ((flags & BNA_CQ_EF_VLAN) && + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); + + if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) + netif_receive_skb(skb); + else + napi_gro_frags(&rx_ctrl->napi); + +next: + BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth); + for (vec = 0; vec < nvecs; vec++) { + cmpl = &cq[ccb->producer_index]; + cmpl->valid = 0; + BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); + } + } + + napi_gro_flush(&rx_ctrl->napi, false); + if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) + bna_ib_ack_disable_irq(ccb->i_dbell, packets); + + bnad_rxq_post(bnad, ccb->rcb[0]); + if (ccb->rcb[1]) + bnad_rxq_post(bnad, ccb->rcb[1]); + + return packets; +} + +static void +bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); + struct napi_struct *napi = &rx_ctrl->napi; + + if (likely(napi_schedule_prep(napi))) { + __napi_schedule(napi); + rx_ctrl->rx_schedule++; + } +} + +/* MSIX Rx Path Handler */ +static irqreturn_t +bnad_msix_rx(int irq, void *data) +{ + struct bna_ccb *ccb = (struct bna_ccb *)data; + + if (ccb) { + ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++; + bnad_netif_rx_schedule_poll(ccb->bnad, ccb); + } + + return IRQ_HANDLED; +} + +/* Interrupt handlers */ + +/* Mbox Interrupt Handlers */ +static irqreturn_t +bnad_msix_mbox_handler(int irq, void *data) +{ + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_HANDLED; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t +bnad_isr(int irq, void *data) +{ + int i, j; + u32 intr_status; + unsigned long flags; + struct bnad *bnad = (struct bnad *)data; + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + struct bna_tcb *tcb = NULL; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + bna_intr_status_get(&bnad->bna, intr_status); + + if (unlikely(!intr_status)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return IRQ_NONE; + } + + if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status)) + bna_mbox_handler(&bnad->bna, intr_status); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (!BNA_IS_INTX_DATA_INTR(intr_status)) + return IRQ_HANDLED; + + /* Process data interrupts */ + /* Tx processing */ + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + tcb = bnad->tx_info[i].tcb[j]; + if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) + bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]); + } + } + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + return IRQ_HANDLED; +} + +/* + * Called in interrupt / callback context + * with bna_lock held, so cfg_flags access is OK + */ +static void +bnad_enable_mbox_irq(struct bnad *bnad) +{ + clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); +} + +/* + * Called with bnad->bna_lock held b'cos of + * bnad->cfg_flags access. + */ +static void +bnad_disable_mbox_irq(struct bnad *bnad) +{ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); +} + +static void +bnad_set_netdev_perm_addr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + + ether_addr_copy(netdev->perm_addr, bnad->perm_addr); + if (is_zero_ether_addr(netdev->dev_addr)) + eth_hw_addr_set(netdev, bnad->perm_addr); +} + +/* Control Path Handlers */ + +/* Callbacks */ +void +bnad_cb_mbox_intr_enable(struct bnad *bnad) +{ + bnad_enable_mbox_irq(bnad); +} + +void +bnad_cb_mbox_intr_disable(struct bnad *bnad) +{ + bnad_disable_mbox_irq(bnad); +} + +void +bnad_cb_ioceth_ready(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_failed(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL; + complete(&bnad->bnad_completions.ioc_comp); +} + +void +bnad_cb_ioceth_disabled(struct bnad *bnad) +{ + bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.ioc_comp); +} + +static void +bnad_cb_enet_disabled(void *arg) +{ + struct bnad *bnad = (struct bnad *)arg; + + netif_carrier_off(bnad->netdev); + complete(&bnad->bnad_completions.enet_comp); +} + +void +bnad_cb_ethport_link_status(struct bnad *bnad, + enum bna_link_status link_status) +{ + bool link_up = false; + + link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP); + + if (link_status == BNA_CEE_UP) { + if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } else { + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) + BNAD_UPDATE_CTR(bnad, cee_toggle); + clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags); + } + + if (link_up) { + if (!netif_carrier_ok(bnad->netdev)) { + uint tx_id, tcb_id; + netdev_info(bnad->netdev, "link up\n"); + netif_carrier_on(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { + for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx; + tcb_id++) { + struct bna_tcb *tcb = + bnad->tx_info[tx_id].tcb[tcb_id]; + u32 txq_id; + if (!tcb) + continue; + + txq_id = tcb->id; + + if (test_bit(BNAD_TXQ_TX_STARTED, + &tcb->flags)) { + /* + * Force an immediate + * Transmit Schedule */ + netif_wake_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_wakeup); + } else { + netif_stop_subqueue( + bnad->netdev, + txq_id); + BNAD_UPDATE_CTR(bnad, + netif_queue_stop); + } + } + } + } + } else { + if (netif_carrier_ok(bnad->netdev)) { + netdev_info(bnad->netdev, "link down\n"); + netif_carrier_off(bnad->netdev); + BNAD_UPDATE_CTR(bnad, link_toggle); + } + } +} + +static void +bnad_cb_tx_disabled(void *arg, struct bna_tx *tx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.tx_comp); +} + +static void +bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + tcb->priv = tcb; + tx_info->tcb[tcb->id] = tcb; +} + +static void +bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) +{ + struct bnad_tx_info *tx_info = + (struct bnad_tx_info *)tcb->txq->tx->priv; + + tx_info->tcb[tcb->id] = NULL; + tcb->priv = NULL; +} + +static void +bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = ccb; + ccb->ctrl = &rx_info->rx_ctrl[ccb->id]; +} + +static void +bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb) +{ + struct bnad_rx_info *rx_info = + (struct bnad_rx_info *)ccb->cq->rx->priv; + + rx_info->rx_ctrl[ccb->id].ccb = NULL; +} + +static void +bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = tx->priv; + struct bna_tcb *tcb; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + netif_stop_subqueue(bnad->netdev, txq_id); + } +} + +static void +bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = tx->priv; + struct bna_tcb *tcb; + u32 txq_id; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + txq_id = tcb->id; + + BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)); + set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); + BUG_ON(*(tcb->hw_consumer_index) != 0); + + if (netif_carrier_ok(bnad->netdev)) { + netif_wake_subqueue(bnad->netdev, txq_id); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + /* + * Workaround for first ioceth enable failure & we + * get a 0 MAC address. We try to get the MAC address + * again here. + */ + if (is_zero_ether_addr(bnad->perm_addr)) { + bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + } +} + +/* + * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm. + */ +static void +bnad_tx_cleanup(struct delayed_work *work) +{ + struct bnad_tx_info *tx_info = + container_of(work, struct bnad_tx_info, tx_cleanup_work); + struct bnad *bnad = NULL; + struct bna_tcb *tcb; + unsigned long flags; + u32 i, pending = 0; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + + bnad = tcb->bnad; + + if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + pending++; + continue; + } + + bnad_txq_cleanup(bnad, tcb); + + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } + + if (pending) { + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, + msecs_to_jiffies(1)); + return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_cleanup_complete(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) +{ + struct bnad_tx_info *tx_info = tx->priv; + struct bna_tcb *tcb; + int i; + + for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) { + tcb = tx_info->tcb[i]; + if (!tcb) + continue; + } + + queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0); +} + +static void +bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags); + } +} + +/* + * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm. + */ +static void +bnad_rx_cleanup(void *work) +{ + struct bnad_rx_info *rx_info = + container_of(work, struct bnad_rx_info, rx_cleanup_work); + struct bnad_rx_ctrl *rx_ctrl; + struct bnad *bnad = NULL; + unsigned long flags; + u32 i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + + if (!rx_ctrl->ccb) + continue; + + bnad = rx_ctrl->ccb->bnad; + + /* + * Wait till the poll handler has exited + * and nothing can be scheduled anymore + */ + napi_disable(&rx_ctrl->napi); + + bnad_cq_cleanup(bnad, rx_ctrl->ccb); + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]); + if (rx_ctrl->ccb->rcb[1]) + bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_cleanup_complete(rx_info->rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = rx->priv; + struct bna_ccb *ccb; + struct bnad_rx_ctrl *rx_ctrl; + int i; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); + + if (ccb->rcb[1]) + clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); + } + + queue_work(bnad->work_q, &rx_info->rx_cleanup_work); +} + +static void +bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx) +{ + struct bnad_rx_info *rx_info = rx->priv; + struct bna_ccb *ccb; + struct bna_rcb *rcb; + struct bnad_rx_ctrl *rx_ctrl; + int i, j; + + for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { + rx_ctrl = &rx_info->rx_ctrl[i]; + ccb = rx_ctrl->ccb; + if (!ccb) + continue; + + napi_enable(&rx_ctrl->napi); + + for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { + rcb = ccb->rcb[j]; + if (!rcb) + continue; + + bnad_rxq_alloc_init(bnad, rcb); + set_bit(BNAD_RXQ_STARTED, &rcb->flags); + set_bit(BNAD_RXQ_POST_OK, &rcb->flags); + bnad_rxq_post(bnad, rcb); + } + } +} + +static void +bnad_cb_rx_disabled(void *arg, struct bna_rx *rx) +{ + struct bnad *bnad = (struct bnad *)arg; + + complete(&bnad->bnad_completions.rx_comp); +} + +static void +bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx) +{ + bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mcast_comp); +} + +void +bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status, + struct bna_stats *stats) +{ + if (status == BNA_CB_SUCCESS) + BNAD_UPDATE_CTR(bnad, hw_stats_updates); + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); +} + +static void +bnad_cb_enet_mtu_set(struct bnad *bnad) +{ + bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS; + complete(&bnad->bnad_completions.mtu_comp); +} + +void +bnad_cb_completion(void *arg, enum bfa_status status) +{ + struct bnad_iocmd_comp *iocmd_comp = + (struct bnad_iocmd_comp *)arg; + + iocmd_comp->comp_status = (u32) status; + complete(&iocmd_comp->comp); +} + +/* Resource allocation, free functions */ + +static void +bnad_mem_free(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if (mem_info->mdl == NULL) + return; + + for (i = 0; i < mem_info->num; i++) { + if (mem_info->mdl[i].kva != NULL) { + if (mem_info->mem_type == BNA_MEM_T_DMA) { + BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma), + dma_pa); + dma_free_coherent(&bnad->pcidev->dev, + mem_info->mdl[i].len, + mem_info->mdl[i].kva, dma_pa); + } else + kfree(mem_info->mdl[i].kva); + } + } + kfree(mem_info->mdl); + mem_info->mdl = NULL; +} + +static int +bnad_mem_alloc(struct bnad *bnad, + struct bna_mem_info *mem_info) +{ + int i; + dma_addr_t dma_pa; + + if ((mem_info->num == 0) || (mem_info->len == 0)) { + mem_info->mdl = NULL; + return 0; + } + + mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr), + GFP_KERNEL); + if (mem_info->mdl == NULL) + return -ENOMEM; + + if (mem_info->mem_type == BNA_MEM_T_DMA) { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = + dma_alloc_coherent(&bnad->pcidev->dev, + mem_info->len, &dma_pa, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + + BNA_SET_DMA_ADDR(dma_pa, + &(mem_info->mdl[i].dma)); + } + } else { + for (i = 0; i < mem_info->num; i++) { + mem_info->mdl[i].len = mem_info->len; + mem_info->mdl[i].kva = kzalloc(mem_info->len, + GFP_KERNEL); + if (mem_info->mdl[i].kva == NULL) + goto err_return; + } + } + + return 0; + +err_return: + bnad_mem_free(bnad, mem_info); + return -ENOMEM; +} + +/* Free IRQ for Mailbox */ +static void +bnad_mbox_irq_free(struct bnad *bnad) +{ + int irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_disable_mbox_irq(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + irq = BNAD_GET_MBOX_IRQ(bnad); + free_irq(irq, bnad); +} + +/* + * Allocates IRQ for Mailbox, but keep it disabled + * This will be enabled once we get the mbox enable callback + * from bna + */ +static int +bnad_mbox_irq_alloc(struct bnad *bnad) +{ + int err = 0; + unsigned long irq_flags, flags; + u32 irq; + irq_handler_t irq_handler; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) { + irq_handler = (irq_handler_t)bnad_msix_mbox_handler; + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + irq_flags = 0; + } else { + irq_handler = (irq_handler_t)bnad_isr; + irq = bnad->pcidev->irq; + irq_flags = IRQF_SHARED; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME); + + /* + * Set the Mbox IRQ disable flag, so that the IRQ handler + * called from request_irq() for SHARED IRQs do not execute + */ + set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); + + BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); + + err = request_irq(irq, irq_handler, irq_flags, + bnad->mbox_irq_name, bnad); + + return err; +} + +static void +bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info) +{ + kfree(intr_info->idl); + intr_info->idl = NULL; +} + +/* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */ +static int +bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src, + u32 txrx_id, struct bna_intr_info *intr_info) +{ + int i, vector_start = 0; + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + intr_info->intr_type = BNA_INTR_T_MSIX; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id; + break; + + case BNAD_INTR_RX: + vector_start = BNAD_MAILBOX_MSIX_VECTORS + + (bnad->num_tx * bnad->num_txq_per_tx) + + txrx_id; + break; + + default: + BUG(); + } + + for (i = 0; i < intr_info->num; i++) + intr_info->idl[i].vector = vector_start + i; + } else { + intr_info->intr_type = BNA_INTR_T_INTX; + intr_info->num = 1; + intr_info->idl = kcalloc(intr_info->num, + sizeof(struct bna_intr_descr), + GFP_KERNEL); + if (!intr_info->idl) + return -ENOMEM; + + switch (src) { + case BNAD_INTR_TX: + intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK; + break; + + case BNAD_INTR_RX: + intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK; + break; + } + } + return 0; +} + +/* NOTE: Should be called for MSIX only + * Unregisters Tx MSIX vector(s) from the kernel + */ +static void +bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info, + int num_txqs) +{ + int i; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + if (tx_info->tcb[i] == NULL) + continue; + + vector_num = tx_info->tcb[i]->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]); + } +} + +/* NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, + u32 tx_id, int num_txqs) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_txqs; i++) { + vector_num = tx_info->tcb[i]->intr_vector; + sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_tx, 0, + tx_info->tcb[i]->name, + tx_info->tcb[i]); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_tx_msix_unregister(bnad, tx_info, (i - 1)); + return -1; +} + +/* NOTE: Should be called for MSIX only + * Unregisters Rx MSIX vector(s) from the kernel + */ +static void +bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info, + int num_rxps) +{ + int i; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + if (rx_info->rx_ctrl[i].ccb == NULL) + continue; + + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + free_irq(bnad->msix_table[vector_num].vector, + rx_info->rx_ctrl[i].ccb); + } +} + +/* NOTE: Should be called for MSIX only + * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel + */ +static int +bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, + u32 rx_id, int num_rxps) +{ + int i; + int err; + int vector_num; + + for (i = 0; i < num_rxps; i++) { + vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; + sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", + bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); + err = request_irq(bnad->msix_table[vector_num].vector, + (irq_handler_t)bnad_msix_rx, 0, + rx_info->rx_ctrl[i].ccb->name, + rx_info->rx_ctrl[i].ccb); + if (err) + goto err_return; + } + + return 0; + +err_return: + if (i > 0) + bnad_rx_msix_unregister(bnad, rx_info, (i - 1)); + return -1; +} + +/* Free Tx object Resources */ +static void +bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Tx object */ +static int +bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 tx_id) +{ + int i, err = 0; + + for (i = 0; i < BNA_TX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Free Rx object Resources */ +static void +bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info) +{ + int i; + + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info); + } +} + +/* Allocates memory and interrupt resources for Rx object */ +static int +bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + uint rx_id) +{ + int i, err = 0; + + /* All memory needs to be allocated before setup_ccbs */ + for (i = 0; i < BNA_RX_RES_T_MAX; i++) { + if (res_info[i].res_type == BNA_RES_T_MEM) + err = bnad_mem_alloc(bnad, + &res_info[i].res_u.mem_info); + else if (res_info[i].res_type == BNA_RES_T_INTR) + err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id, + &res_info[i].res_u.intr_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_rx_res_free(bnad, res_info); + return err; +} + +/* Timer callbacks */ +/* a) IOC timer */ +static void +bnad_ioc_timeout(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_ioc_hb_check(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_timeout(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_iocpf_sem_timeout(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * All timer routines use bnad->bna_lock to protect against + * the following race, which may occur in case of no locking: + * Time CPU m CPU n + * 0 1 = test_bit + * 1 clear_bit + * 2 del_timer_sync + * 3 mod_timer + */ + +/* b) Dynamic Interrupt Moderation Timer */ +static void +bnad_dim_timeout(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, dim_timer); + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + int i, j; + unsigned long flags; + + if (!netif_carrier_ok(bnad->netdev)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (!rx_ctrl->ccb) + continue; + bna_rx_dim_update(rx_ctrl->ccb); + } + } + + /* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */ + if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* c) Statistics Timer */ +static void +bnad_stats_timeout(struct timer_list *t) +{ + struct bnad *bnad = from_timer(bnad, t, stats_timer); + unsigned long flags; + + if (!netif_running(bnad->netdev) || + !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + return; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_hw_stats_get(&bnad->bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Set up timer for DIM + * Called with bnad->bna_lock held + */ +void +bnad_dim_timer_start(struct bnad *bnad) +{ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0); + set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + mod_timer(&bnad->dim_timer, + jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ)); + } +} + +/* + * Set up timer for statistics + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_start(struct bnad *bnad) +{ + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) { + timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0); + mod_timer(&bnad->stats_timer, + jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ)); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * Stops the stats timer + * Called with mutex_lock(&bnad->conf_mutex) held + */ +static void +bnad_stats_timer_stop(struct bnad *bnad) +{ + int to_del = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) + to_del = 1; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->stats_timer); +} + +/* Utilities */ + +static void +bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) +{ + int i = 1; /* Index 0 has broadcast address */ + struct netdev_hw_addr *mc_addr; + + netdev_for_each_mc_addr(mc_addr, netdev) { + ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]); + i++; + } +} + +static int +bnad_napi_poll_rx(struct napi_struct *napi, int budget) +{ + struct bnad_rx_ctrl *rx_ctrl = + container_of(napi, struct bnad_rx_ctrl, napi); + struct bnad *bnad = rx_ctrl->bnad; + int rcvd = 0; + + rx_ctrl->rx_poll_ctr++; + + if (!netif_carrier_ok(bnad->netdev)) + goto poll_exit; + + rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget); + if (rcvd >= budget) + return rcvd; + +poll_exit: + napi_complete_done(napi, rcvd); + + rx_ctrl->rx_complete++; + + if (rx_ctrl->ccb) + bnad_enable_rx_irq_unsafe(rx_ctrl->ccb); + + return rcvd; +} + +static void +bnad_napi_add(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_ctrl *rx_ctrl; + int i; + + /* Initialize & enable NAPI */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) { + rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; + netif_napi_add(bnad->netdev, &rx_ctrl->napi, + bnad_napi_poll_rx); + } +} + +static void +bnad_napi_delete(struct bnad *bnad, u32 rx_id) +{ + int i; + + /* First disable and then clean up */ + for (i = 0; i < bnad->num_rxp_per_rx; i++) + netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); +} + +/* Should be held with conf_lock held */ +void +bnad_destroy_tx(struct bnad *bnad, u32 tx_id) +{ + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + unsigned long flags; + + if (!tx_info->tx) + return; + + init_completion(&bnad->bnad_completions.tx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.tx_comp); + + if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX) + bnad_tx_msix_unregister(bnad, tx_info, + bnad->num_txq_per_tx); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + tx_info->tx = NULL; + tx_info->tx_id = 0; + + bnad_tx_res_free(bnad, res_info); +} + +/* Should be held with conf_lock held */ +int +bnad_setup_tx(struct bnad *bnad, u32 tx_id) +{ + int err; + struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; + struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info; + struct bna_tx_config *tx_config = &bnad->tx_config[tx_id]; + static const struct bna_tx_event_cbfn tx_cbfn = { + .tcb_setup_cbfn = bnad_cb_tcb_setup, + .tcb_destroy_cbfn = bnad_cb_tcb_destroy, + .tx_stall_cbfn = bnad_cb_tx_stall, + .tx_resume_cbfn = bnad_cb_tx_resume, + .tx_cleanup_cbfn = bnad_cb_tx_cleanup, + }; + + struct bna_tx *tx; + unsigned long flags; + + tx_info->tx_id = tx_id; + + /* Initialize the Tx object configuration */ + tx_config->num_txq = bnad->num_txq_per_tx; + tx_config->txq_depth = bnad->txq_depth; + tx_config->tx_type = BNA_TX_T_REGULAR; + tx_config->coalescing_timeo = bnad->tx_coalescing_timeo; + + /* Get BNA's resource requirement for one tx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_res_req(bnad->num_txq_per_tx, + bnad->txq_depth, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ], + bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) * + bnad->txq_depth)); + + /* Allocate resources */ + err = bnad_tx_res_alloc(bnad, res_info, tx_id); + if (err) + return err; + + /* Ask BNA to create one Tx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info, + tx_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (!tx) { + err = -ENOMEM; + goto err_return; + } + tx_info->tx = tx; + + INIT_DELAYED_WORK(&tx_info->tx_cleanup_work, + (work_func_t)bnad_tx_cleanup); + + /* Register ISR for the Tx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_tx_msix_register(bnad, tx_info, + tx_id, bnad->num_txq_per_tx); + if (err) + goto cleanup_tx; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_enable(tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return 0; + +cleanup_tx: + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_tx_destroy(tx_info->tx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + tx_info->tx = NULL; + tx_info->tx_id = 0; +err_return: + bnad_tx_res_free(bnad, res_info); + return err; +} + +/* Setup the rx config for bna_rx_create */ +/* bnad decides the configuration */ +static void +bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) +{ + memset(rx_config, 0, sizeof(*rx_config)); + rx_config->rx_type = BNA_RX_T_REGULAR; + rx_config->num_paths = bnad->num_rxp_per_rx; + rx_config->coalescing_timeo = bnad->rx_coalescing_timeo; + + if (bnad->num_rxp_per_rx > 1) { + rx_config->rss_status = BNA_STATUS_T_ENABLED; + rx_config->rss_config.hash_type = + (BFI_ENET_RSS_IPV6 | + BFI_ENET_RSS_IPV6_TCP | + BFI_ENET_RSS_IPV4 | + BFI_ENET_RSS_IPV4_TCP); + rx_config->rss_config.hash_mask = + bnad->num_rxp_per_rx - 1; + netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key, + sizeof(rx_config->rss_config.toeplitz_hash_key)); + } else { + rx_config->rss_status = BNA_STATUS_T_DISABLED; + memset(&rx_config->rss_config, 0, + sizeof(rx_config->rss_config)); + } + + rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu); + rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED; + + /* BNA_RXP_SINGLE - one data-buffer queue + * BNA_RXP_SLR - one small-buffer and one large-buffer queues + * BNA_RXP_HDS - one header-buffer and one data-buffer queues + */ + /* TODO: configurable param for queue type */ + rx_config->rxp_type = BNA_RXP_SLR; + + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + rx_config->frame_size > 4096) { + /* though size_routing_enable is set in SLR, + * small packets may get routed to same rxq. + * set buf_size to 2048 instead of PAGE_SIZE. + */ + rx_config->q0_buf_size = 2048; + /* this should be in multiples of 2 */ + rx_config->q0_num_vecs = 4; + rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs; + rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED; + } else { + rx_config->q0_buf_size = rx_config->frame_size; + rx_config->q0_num_vecs = 1; + rx_config->q0_depth = bnad->rxq_depth; + } + + /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */ + if (rx_config->rxp_type == BNA_RXP_SLR) { + rx_config->q1_depth = bnad->rxq_depth; + rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; + } + + rx_config->vlan_strip_status = + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? + BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; +} + +static void +bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + int i; + + for (i = 0; i < bnad->num_rxp_per_rx; i++) + rx_info->rx_ctrl[i].bnad = bnad; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +static u32 +bnad_reinit_rx(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + u32 err = 0, current_err = 0; + u32 rx_id = 0, count = 0; + unsigned long flags; + + /* destroy and create new rx objects */ + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + if (!bnad->rx_info[rx_id].rx) + continue; + bnad_destroy_rx(bnad, rx_id); + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) { + count++; + current_err = bnad_setup_rx(bnad, rx_id); + if (current_err && !err) { + err = current_err; + netdev_err(netdev, "RXQ:%u setup failed\n", rx_id); + } + } + + /* restore rx configuration */ + if (bnad->rx_info[0].rx && !err) { + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_set_rx_mode(netdev); + } + + return count; +} + +/* Called with bnad_conf_lock() held */ +void +bnad_destroy_rx(struct bnad *bnad, u32 rx_id) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + unsigned long flags; + int to_del = 0; + + if (!rx_info->rx) + return; + + if (0 == rx_id) { + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + } + + init_completion(&bnad->bnad_completions.rx_comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&bnad->bnad_completions.rx_comp); + + if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) + bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); + + bnad_napi_delete(bnad, rx_id); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_destroy(rx_info->rx); + + rx_info->rx = NULL; + rx_info->rx_id = 0; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_rx_res_free(bnad, res_info); +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +int +bnad_setup_rx(struct bnad *bnad, u32 rx_id) +{ + int err; + struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; + struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0]; + struct bna_intr_info *intr_info = + &res_info[BNA_RX_RES_T_INTR].res_u.intr_info; + struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; + static const struct bna_rx_event_cbfn rx_cbfn = { + .rcb_setup_cbfn = NULL, + .rcb_destroy_cbfn = NULL, + .ccb_setup_cbfn = bnad_cb_ccb_setup, + .ccb_destroy_cbfn = bnad_cb_ccb_destroy, + .rx_stall_cbfn = bnad_cb_rx_stall, + .rx_cleanup_cbfn = bnad_cb_rx_cleanup, + .rx_post_cbfn = bnad_cb_rx_post, + }; + struct bna_rx *rx; + unsigned long flags; + + rx_info->rx_id = rx_id; + + /* Initialize the Rx object configuration */ + bnad_init_rx_config(bnad, rx_config); + + /* Get BNA's resource requirement for one Rx object */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_res_req(rx_config, res_info); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Fill Unmap Q memory requirements */ + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ], + rx_config->num_paths, + (rx_config->q0_depth * + sizeof(struct bnad_rx_unmap)) + + sizeof(struct bnad_rx_unmap_q)); + + if (rx_config->rxp_type != BNA_RXP_SINGLE) { + BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ], + rx_config->num_paths, + (rx_config->q1_depth * + sizeof(struct bnad_rx_unmap) + + sizeof(struct bnad_rx_unmap_q))); + } + /* Allocate resource */ + err = bnad_rx_res_alloc(bnad, res_info, rx_id); + if (err) + return err; + + bnad_rx_ctrl_init(bnad, rx_id); + + /* Ask BNA to create one Rx object, supplying required resources */ + spin_lock_irqsave(&bnad->bna_lock, flags); + rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info, + rx_info); + if (!rx) { + err = -ENOMEM; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto err_return; + } + rx_info->rx = rx; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + INIT_WORK(&rx_info->rx_cleanup_work, + (work_func_t)(bnad_rx_cleanup)); + + /* + * Init NAPI, so that state is set to NAPI_STATE_SCHED, + * so that IRQ handler cannot schedule NAPI at this point. + */ + bnad_napi_add(bnad, rx_id); + + /* Register ISR for the Rx object */ + if (intr_info->intr_type == BNA_INTR_T_MSIX) { + err = bnad_rx_msix_register(bnad, rx_info, rx_id, + rx_config->num_paths); + if (err) + goto err_return; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (0 == rx_id) { + /* Set up Dynamic Interrupt Moderation Vector */ + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) + bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector); + + /* Enable VLAN filtering only on the default Rx */ + bna_rx_vlanfilter_enable(rx); + + /* Start the DIM timer */ + bnad_dim_timer_start(bnad); + } + + bna_rx_enable(rx); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return 0; + +err_return: + bnad_destroy_rx(bnad, rx_id); + return err; +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_tx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_tx_info *tx_info; + + tx_info = &bnad->tx_info[0]; + if (!tx_info->tx) + return; + + bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo); +} + +/* Called with conf_lock & bnad->bna_lock held */ +void +bnad_rx_coalescing_timeo_set(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info; + int i; + + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + bna_rx_coalescing_timeo_set(rx_info->rx, + bnad->rx_coalescing_timeo); + } +} + +/* + * Called with bnad->bna_lock held + */ +int +bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr) +{ + int ret; + + if (!is_valid_ether_addr(mac_addr)) + return -EADDRNOTAVAIL; + + /* If datapath is down, pretend everything went through */ + if (!bnad->rx_info[0].rx) + return 0; + + ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr); + if (ret != BNA_CB_SUCCESS) + return -EADDRNOTAVAIL; + + return 0; +} + +/* Should be called with conf_lock held */ +int +bnad_enable_default_bcast(struct bnad *bnad) +{ + struct bnad_rx_info *rx_info = &bnad->rx_info[0]; + int ret; + unsigned long flags; + + init_completion(&bnad->bnad_completions.mcast_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr, + bnad_cb_rx_mcast_add); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (ret == BNA_CB_SUCCESS) + wait_for_completion(&bnad->bnad_completions.mcast_comp); + else + return -ENODEV; + + if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS) + return -ENODEV; + + return 0; +} + +/* Called with mutex_lock(&bnad->conf_mutex) held */ +void +bnad_restore_vlans(struct bnad *bnad, u32 rx_id) +{ + u16 vid; + unsigned long flags; + + for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } +} + +/* Statistics utilities */ +void +bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + int i, j; + + for (i = 0; i < bnad->num_rx; i++) { + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + stats->rx_packets += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets; + stats->rx_bytes += bnad->rx_info[i]. + rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes; + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + stats->rx_packets += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_packets; + stats->rx_bytes += + bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]->rxq->rx_bytes; + } + } + } + } + for (i = 0; i < bnad->num_tx; i++) { + for (j = 0; j < bnad->num_txq_per_tx; j++) { + if (bnad->tx_info[i].tcb[j]) { + stats->tx_packets += + bnad->tx_info[i].tcb[j]->txq->tx_packets; + stats->tx_bytes += + bnad->tx_info[i].tcb[j]->txq->tx_bytes; + } + } + } +} + +/* + * Must be called with the bna_lock held. + */ +void +bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) +{ + struct bfi_enet_stats_mac *mac_stats; + u32 bmap; + int i; + + mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats; + stats->rx_errors = + mac_stats->rx_fcs_error + mac_stats->rx_alignment_error + + mac_stats->rx_frame_length_error + mac_stats->rx_code_error + + mac_stats->rx_undersize; + stats->tx_errors = mac_stats->tx_fcs_error + + mac_stats->tx_undersize; + stats->rx_dropped = mac_stats->rx_drop; + stats->tx_dropped = mac_stats->tx_drop; + stats->multicast = mac_stats->rx_multicast; + stats->collisions = mac_stats->tx_total_collision; + + stats->rx_length_errors = mac_stats->rx_frame_length_error; + + /* receive ring buffer overflow ?? */ + + stats->rx_crc_errors = mac_stats->rx_fcs_error; + stats->rx_frame_errors = mac_stats->rx_alignment_error; + /* recv'r fifo overrun */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats->rx_fifo_errors += + bnad->stats.bna_stats-> + hw_stats.rxf_stats[i].frame_drops; + break; + } + bmap >>= 1; + } +} + +static void +bnad_mbox_irq_sync(struct bnad *bnad) +{ + u32 irq; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bnad->cfg_flags & BNAD_CF_MSIX) + irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector; + else + irq = bnad->pcidev->irq; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + synchronize_irq(irq); +} + +/* Utility used by bnad_start_xmit, for doing TSO */ +static int +bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) +{ + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) { + BNAD_UPDATE_CTR(bnad, tso_err); + return err; + } + + /* + * For TSO, the TCP checksum field is seeded with pseudo-header sum + * excluding the length field. + */ + if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + + /* Do we really need these? */ + iph->tot_len = 0; + iph->check = 0; + + tcp_hdr(skb)->check = + ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, + IPPROTO_TCP, 0); + BNAD_UPDATE_CTR(bnad, tso4); + } else { + tcp_v6_gso_csum_prep(skb); + BNAD_UPDATE_CTR(bnad, tso6); + } + + return 0; +} + +/* + * Initialize Q numbers depending on Rx Paths + * Called with bnad->bna_lock held, because of cfg_flags + * access. + */ +static void +bnad_q_num_init(struct bnad *bnad) +{ + int rxps; + + rxps = min((uint)num_online_cpus(), + (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX)); + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) + rxps = 1; /* INTx */ + + bnad->num_rx = 1; + bnad->num_tx = 1; + bnad->num_rxp_per_rx = rxps; + bnad->num_txq_per_tx = BNAD_TXQ_NUM; +} + +/* + * Adjusts the Q numbers, given a number of msix vectors + * Give preference to RSS as opposed to Tx priority Queues, + * in such a case, just use 1 Tx Q + * Called with bnad->bna_lock held b'cos of cfg_flags access + */ +static void +bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp) +{ + bnad->num_txq_per_tx = 1; + if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) + + bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) && + (bnad->cfg_flags & BNAD_CF_MSIX)) { + bnad->num_rxp_per_rx = msix_vectors - + (bnad->num_tx * bnad->num_txq_per_tx) - + BNAD_MAILBOX_MSIX_VECTORS; + } else + bnad->num_rxp_per_rx = 1; +} + +/* Enable / disable ioceth */ +static int +bnad_ioceth_disable(struct bnad *bnad) +{ + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + return err; +} + +static int +bnad_ioceth_enable(struct bnad *bnad) +{ + int err = 0; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + init_completion(&bnad->bnad_completions.ioc_comp); + bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING; + bna_ioceth_enable(&bnad->bna.ioceth); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp, + msecs_to_jiffies(BNAD_IOCETH_TIMEOUT)); + + err = bnad->bnad_completions.ioc_comp_status; + + return err; +} + +/* Free BNA resources */ +static void +bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i; + + for (i = 0; i < res_val_max; i++) + bnad_mem_free(bnad, &res_info[i].res_u.mem_info); +} + +/* Allocates memory and interrupt resources for BNA */ +static int +bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info, + u32 res_val_max) +{ + int i, err; + + for (i = 0; i < res_val_max; i++) { + err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info); + if (err) + goto err_return; + } + return 0; + +err_return: + bnad_res_free(bnad, res_info, res_val_max); + return err; +} + +/* Interrupt enable / disable */ +static void +bnad_enable_msix(struct bnad *bnad) +{ + int i, ret; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (bnad->msix_table) + return; + + bnad->msix_table = + kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL); + + if (!bnad->msix_table) + goto intx_mode; + + for (i = 0; i < bnad->msix_num; i++) + bnad->msix_table[i].entry = i; + + ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table, + 1, bnad->msix_num); + if (ret < 0) { + goto intx_mode; + } else if (ret < bnad->msix_num) { + dev_warn(&bnad->pcidev->dev, + "%d MSI-X vectors allocated < %d requested\n", + ret, bnad->msix_num); + + spin_lock_irqsave(&bnad->bna_lock, flags); + /* ret = #of vectors that we got */ + bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2, + (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP + + BNAD_MAILBOX_MSIX_VECTORS; + + if (bnad->msix_num > ret) { + pci_disable_msix(bnad->pcidev); + goto intx_mode; + } + } + + pci_intx(bnad->pcidev, 0); + + return; + +intx_mode: + dev_warn(&bnad->pcidev->dev, + "MSI-X enable failed - operating in INTx mode\n"); + + kfree(bnad->msix_table); + bnad->msix_table = NULL; + bnad->msix_num = 0; + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~BNAD_CF_MSIX; + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_disable_msix(struct bnad *bnad) +{ + u32 cfg_flags; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + cfg_flags = bnad->cfg_flags; + if (bnad->cfg_flags & BNAD_CF_MSIX) + bnad->cfg_flags &= ~BNAD_CF_MSIX; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + if (cfg_flags & BNAD_CF_MSIX) { + pci_disable_msix(bnad->pcidev); + kfree(bnad->msix_table); + bnad->msix_table = NULL; + } +} + +/* Netdev entry points */ +static int +bnad_open(struct net_device *netdev) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Tx */ + err = bnad_setup_tx(bnad, 0); + if (err) + goto err_return; + + /* Rx */ + err = bnad_setup_rx(bnad, 0); + if (err) + goto cleanup_tx; + + /* Port */ + pause_config.tx_pause = 0; + pause_config.rx_pause = 0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, + BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); + bna_enet_pause_config(&bnad->bna.enet, &pause_config); + bna_enet_enable(&bnad->bna.enet); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Enable broadcast */ + bnad_enable_default_bcast(bnad); + + /* Restore VLANs, if any */ + bnad_restore_vlans(bnad, 0); + + /* Set the UCAST address */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Start the stats timer */ + bnad_stats_timer_start(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; + +cleanup_tx: + bnad_destroy_tx(bnad, 0); + +err_return: + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static int +bnad_stop(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + + /* Stop the stats timer */ + bnad_stats_timer_stop(bnad); + + init_completion(&bnad->bnad_completions.enet_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP, + bnad_cb_enet_disabled); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.enet_comp); + + bnad_destroy_tx(bnad, 0); + bnad_destroy_rx(bnad, 0); + + /* Synchronize mailbox IRQ */ + bnad_mbox_irq_sync(bnad); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +/* TX */ +/* Returns 0 for success */ +static int +bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, + struct sk_buff *skb, struct bna_txq_entry *txqent) +{ + u16 flags = 0; + u32 gso_size; + u16 vlan_tag = 0; + + if (skb_vlan_tag_present(skb)) { + vlan_tag = (u16)skb_vlan_tag_get(skb); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) { + vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT) + | (vlan_tag & 0x1fff); + flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN); + } + txqent->hdr.wi.vlan_tag = htons(vlan_tag); + + if (skb_is_gso(skb)) { + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(gso_size > bnad->netdev->mtu)) { + BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long); + return -EINVAL; + } + if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short); + } else { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO); + txqent->hdr.wi.lso_mss = htons(gso_size); + } + + if (bnad_tso_prepare(bnad, skb)) { + BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare); + return -EINVAL; + } + + flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM); + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET( + tcp_hdrlen(skb) >> 2, skb_transport_offset(skb))); + } else { + txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND); + txqent->hdr.wi.lso_mss = 0; + + if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) { + BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long); + return -EINVAL; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + __be16 net_proto = vlan_get_protocol(skb); + u8 proto = 0; + + if (net_proto == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; +#ifdef NETIF_F_IPV6_CSUM + else if (net_proto == htons(ETH_P_IPV6)) { + /* nexthdr may not be TCP immediately. */ + proto = ipv6_hdr(skb)->nexthdr; + } +#endif + if (proto == IPPROTO_TCP) { + flags |= BNA_TXQ_WI_CF_TCP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, tcpcsum_offload); + + if (unlikely(skb_headlen(skb) < + skb_tcp_all_headers(skb))) { + BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr); + return -EINVAL; + } + } else if (proto == IPPROTO_UDP) { + flags |= BNA_TXQ_WI_CF_UDP_CKSUM; + txqent->hdr.wi.l4_hdr_size_n_offset = + htons(BNA_TXQ_WI_L4_HDR_N_OFFSET + (0, skb_transport_offset(skb))); + + BNAD_UPDATE_CTR(bnad, udpcsum_offload); + if (unlikely(skb_headlen(skb) < + skb_transport_offset(skb) + + sizeof(struct udphdr))) { + BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr); + return -EINVAL; + } + } else { + + BNAD_UPDATE_CTR(bnad, tx_skb_csum_err); + return -EINVAL; + } + } else + txqent->hdr.wi.l4_hdr_size_n_offset = 0; + } + + txqent->hdr.wi.flags = htons(flags); + txqent->hdr.wi.frame_length = htonl(skb->len); + + return 0; +} + +/* + * bnad_start_xmit : Netdev entry point for Transmit + * Called under lock held by net_device + */ +static netdev_tx_t +bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + u32 txq_id = 0; + struct bna_tcb *tcb = NULL; + struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap; + u32 prod, q_depth, vect_id; + u32 wis, vectors, len; + int i; + dma_addr_t dma_addr; + struct bna_txq_entry *txqent; + + len = skb_headlen(skb); + + /* Sanity checks for the skb */ + + if (unlikely(skb->len <= ETH_HLEN)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_too_short); + return NETDEV_TX_OK; + } + if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + if (unlikely(len == 0)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero); + return NETDEV_TX_OK; + } + + tcb = bnad->tx_info[0].tcb[txq_id]; + + /* + * Takes care of the Tx that is scheduled between clearing the flag + * and the netif_tx_stop_all_queues() call. + */ + if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_stopping); + return NETDEV_TX_OK; + } + + q_depth = tcb->q_depth; + prod = tcb->producer_index; + unmap_q = tcb->unmap_q; + + vectors = 1 + skb_shinfo(skb)->nr_frags; + wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */ + + if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors); + return NETDEV_TX_OK; + } + + /* Check for available TxQ resources */ + if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + if ((*tcb->hw_consumer_index != tcb->consumer_index) && + !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { + u32 sent; + sent = bnad_txcmpl_process(bnad, tcb); + if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + bna_ib_ack(tcb->i_dbell, sent); + smp_mb__before_atomic(); + clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); + } else { + netif_stop_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + } + + smp_mb(); + /* + * Check again to deal with race condition between + * netif_stop_queue here, and netif_wake_queue in + * interrupt handler which is not inside netif tx lock. + */ + if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) { + BNAD_UPDATE_CTR(bnad, netif_queue_stop); + return NETDEV_TX_BUSY; + } else { + netif_wake_queue(netdev); + BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); + } + } + + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + head_unmap = &unmap_q[prod]; + + /* Program the opcode, flags, frame_len, num_vectors in WI */ + if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + txqent->hdr.wi.reserved = 0; + txqent->hdr.wi.num_vectors = vectors; + + head_unmap->skb = skb; + head_unmap->nvecs = 0; + + /* Program the vectors */ + unmap = head_unmap; + dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr); + txqent->vector[0].length = htons(len); + dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr); + head_unmap->nvecs++; + + for (i = 0, vect_id = 0; i < vectors - 1; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + u32 size = skb_frag_size(frag); + + if (unlikely(size == 0)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero); + return NETDEV_TX_OK; + } + + len += size; + + vect_id++; + if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) { + vect_id = 0; + BNA_QE_INDX_INC(prod, q_depth); + txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod]; + txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION); + unmap = &unmap_q[prod]; + } + + dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag, + 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, + tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_map_failed); + return NETDEV_TX_OK; + } + + dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size); + BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr); + txqent->vector[vect_id].length = htons(size); + dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr, + dma_addr); + head_unmap->nvecs++; + } + + if (unlikely(len != skb->len)) { + /* Undo the changes starting at tcb->producer_index */ + bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index); + dev_kfree_skb_any(skb); + BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch); + return NETDEV_TX_OK; + } + + BNA_QE_INDX_INC(prod, q_depth); + tcb->producer_index = prod; + + wmb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + + skb_tx_timestamp(skb); + + bna_txq_prod_indx_doorbell(tcb); + + return NETDEV_TX_OK; +} + +/* + * Used spin_lock to synchronize reading of stats structures, which + * is written by BNA under the same lock. + */ +static void +bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + bnad_netdev_qstats_fill(bnad, stats); + bnad_netdev_hwstats_fill(bnad, stats); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +static void +bnad_set_rx_ucast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int uc_count = netdev_uc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + struct netdev_hw_addr *ha; + int entry; + + if (netdev_uc_empty(bnad->netdev)) { + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); + return; + } + + if (uc_count > bna_attr(&bnad->bna)->num_ucmac) + goto mode_default; + + mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC); + if (mac_list == NULL) + goto mode_default; + + entry = 0; + netdev_for_each_uc_addr(ha, netdev) { + ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]); + entry++; + } + + ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_default; + + return; + + /* ucast packets not in UCAM are routed to default function */ +mode_default: + bnad->cfg_flags |= BNAD_CF_DEFAULT; + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); +} + +static void +bnad_set_rx_mcast_fltr(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + int mc_count = netdev_mc_count(netdev); + enum bna_cb_status ret; + u8 *mac_list; + + if (netdev->flags & IFF_ALLMULTI) + goto mode_allmulti; + + if (netdev_mc_empty(netdev)) + return; + + if (mc_count > bna_attr(&bnad->bna)->num_mcmac) + goto mode_allmulti; + + mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC); + + if (mac_list == NULL) + goto mode_allmulti; + + ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]); + + /* copy rest of the MCAST addresses */ + bnad_netdev_mc_list_get(netdev, mac_list); + ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list); + kfree(mac_list); + + if (ret != BNA_CB_SUCCESS) + goto mode_allmulti; + + return; + +mode_allmulti: + bnad->cfg_flags |= BNAD_CF_ALLMULTI; + bna_rx_mcast_delall(bnad->rx_info[0].rx); +} + +void +bnad_set_rx_mode(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + enum bna_rxmode new_mode, mode_mask; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (bnad->rx_info[0].rx == NULL) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + return; + } + + /* clear bnad flags to update it with new settings */ + bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT | + BNAD_CF_ALLMULTI); + + new_mode = 0; + if (netdev->flags & IFF_PROMISC) { + new_mode |= BNAD_RXMODE_PROMISC_DEFAULT; + bnad->cfg_flags |= BNAD_CF_PROMISC; + } else { + bnad_set_rx_mcast_fltr(bnad); + + if (bnad->cfg_flags & BNAD_CF_ALLMULTI) + new_mode |= BNA_RXMODE_ALLMULTI; + + bnad_set_rx_ucast_fltr(bnad); + + if (bnad->cfg_flags & BNAD_CF_DEFAULT) + new_mode |= BNA_RXMODE_DEFAULT; + } + + mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | + BNA_RXMODE_ALLMULTI; + bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); +} + +/* + * bna_lock is used to sync writes to netdev->addr + * conf_lock cannot be used since this call may be made + * in a non-blocking context. + */ +static int +bnad_set_mac_address(struct net_device *netdev, void *addr) +{ + int err; + struct bnad *bnad = netdev_priv(netdev); + struct sockaddr *sa = (struct sockaddr *)addr; + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + err = bnad_mac_addr_set_locked(bnad, sa->sa_data); + if (!err) + eth_hw_addr_set(netdev, sa->sa_data); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return err; +} + +static int +bnad_mtu_set(struct bnad *bnad, int frame_size) +{ + unsigned long flags; + + init_completion(&bnad->bnad_completions.mtu_comp); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + wait_for_completion(&bnad->bnad_completions.mtu_comp); + + return bnad->bnad_completions.mtu_comp_status; +} + +static int +bnad_change_mtu(struct net_device *netdev, int new_mtu) +{ + int err, mtu; + struct bnad *bnad = netdev_priv(netdev); + u32 frame, new_frame; + + mutex_lock(&bnad->conf_mutex); + + mtu = netdev->mtu; + netdev->mtu = new_mtu; + + frame = BNAD_FRAME_SIZE(mtu); + new_frame = BNAD_FRAME_SIZE(new_mtu); + + /* check if multi-buffer needs to be enabled */ + if (BNAD_PCI_DEV_IS_CAT2(bnad) && + netif_running(bnad->netdev)) { + /* only when transition is over 4K */ + if ((frame <= 4096 && new_frame > 4096) || + (frame > 4096 && new_frame <= 4096)) + bnad_reinit_rx(bnad); + } + + err = bnad_mtu_set(bnad, new_frame); + if (err) + err = -EBUSY; + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static int +bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return 0; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_rx_vlan_add(bnad->rx_info[0].rx, vid); + set_bit(vid, bnad->active_vlans); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +static int +bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + if (!bnad->rx_info[0].rx) + return 0; + + mutex_lock(&bnad->conf_mutex); + + spin_lock_irqsave(&bnad->bna_lock, flags); + clear_bit(vid, bnad->active_vlans); + bna_rx_vlan_del(bnad->rx_info[0].rx, vid); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + return 0; +} + +static int bnad_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnad *bnad = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); + else + bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +bnad_netpoll(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_rx_info *rx_info; + struct bnad_rx_ctrl *rx_ctrl; + u32 curr_mask; + int i, j; + + if (!(bnad->cfg_flags & BNAD_CF_MSIX)) { + bna_intx_disable(&bnad->bna, curr_mask); + bnad_isr(bnad->pcidev->irq, netdev); + bna_intx_enable(&bnad->bna, curr_mask); + } else { + /* + * Tx processing may happen in sending context, so no need + * to explicitly process completions here + */ + + /* Rx processing */ + for (i = 0; i < bnad->num_rx; i++) { + rx_info = &bnad->rx_info[i]; + if (!rx_info->rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + rx_ctrl = &rx_info->rx_ctrl[j]; + if (rx_ctrl->ccb) + bnad_netif_rx_schedule_poll(bnad, + rx_ctrl->ccb); + } + } + } +} +#endif + +static const struct net_device_ops bnad_netdev_ops = { + .ndo_open = bnad_open, + .ndo_stop = bnad_stop, + .ndo_start_xmit = bnad_start_xmit, + .ndo_get_stats64 = bnad_get_stats64, + .ndo_set_rx_mode = bnad_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = bnad_set_mac_address, + .ndo_change_mtu = bnad_change_mtu, + .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, + .ndo_set_features = bnad_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = bnad_netpoll +#endif +}; + +static void +bnad_netdev_init(struct bnad *bnad) +{ + struct net_device *netdev = bnad->netdev; + + netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + + netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | + NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | + NETIF_F_TSO | NETIF_F_TSO6; + + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HIGHDMA; + + netdev->mem_start = bnad->mmio_start; + netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1; + + /* MTU range: 46 - 9000 */ + netdev->min_mtu = ETH_ZLEN - ETH_HLEN; + netdev->max_mtu = BNAD_JUMBO_MTU; + + netdev->netdev_ops = &bnad_netdev_ops; + bnad_set_ethtool_ops(netdev); +} + +/* + * 1. Initialize the bnad structure + * 2. Setup netdev pointer in pci_dev + * 3. Initialize no. of TxQ & CQs & MSIX vectors + * 4. Initialize work queue. + */ +static int +bnad_init(struct bnad *bnad, + struct pci_dev *pdev, struct net_device *netdev) +{ + unsigned long flags; + + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + bnad->netdev = netdev; + bnad->pcidev = pdev; + bnad->mmio_start = pci_resource_start(pdev, 0); + bnad->mmio_len = pci_resource_len(pdev, 0); + bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len); + if (!bnad->bar0) { + dev_err(&pdev->dev, "ioremap for bar0 failed\n"); + return -ENOMEM; + } + dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0, + (unsigned long long) bnad->mmio_len); + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (!bnad_msix_disable) + bnad->cfg_flags = BNAD_CF_MSIX; + + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + + bnad_q_num_init(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) + + (bnad->num_rx * bnad->num_rxp_per_rx) + + BNAD_MAILBOX_MSIX_VECTORS; + + bnad->txq_depth = BNAD_TXQ_DEPTH; + bnad->rxq_depth = BNAD_RXQ_DEPTH; + + bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; + bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; + + sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); + bnad->work_q = create_singlethread_workqueue(bnad->wq_name); + if (!bnad->work_q) { + iounmap(bnad->bar0); + return -ENOMEM; + } + + return 0; +} + +/* + * Must be called after bnad_pci_uninit() + * so that iounmap() and pci_set_drvdata(NULL) + * happens only after PCI uninitialization. + */ +static void +bnad_uninit(struct bnad *bnad) +{ + if (bnad->work_q) { + destroy_workqueue(bnad->work_q); + bnad->work_q = NULL; + } + + if (bnad->bar0) + iounmap(bnad->bar0); +} + +/* + * Initialize locks + a) Per ioceth mutes used for serializing configuration + changes from OS interface + b) spin lock used to protect bna state machine + */ +static void +bnad_lock_init(struct bnad *bnad) +{ + spin_lock_init(&bnad->bna_lock); + mutex_init(&bnad->conf_mutex); +} + +static void +bnad_lock_uninit(struct bnad *bnad) +{ + mutex_destroy(&bnad->conf_mutex); +} + +/* PCI Initialization */ +static int +bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev) +{ + int err; + + err = pci_enable_device(pdev); + if (err) + return err; + err = pci_request_regions(pdev, BNAD_NAME); + if (err) + goto disable_device; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + goto release_regions; + pci_set_master(pdev); + return 0; + +release_regions: + pci_release_regions(pdev); +disable_device: + pci_disable_device(pdev); + + return err; +} + +static void +bnad_pci_uninit(struct pci_dev *pdev) +{ + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static int +bnad_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *pcidev_id) +{ + int err; + struct bnad *bnad; + struct bna *bna; + struct net_device *netdev; + struct bfa_pcidev pcidev_info; + unsigned long flags; + + mutex_lock(&bnad_fwimg_mutex); + if (!cna_get_firmware_buf(pdev)) { + mutex_unlock(&bnad_fwimg_mutex); + dev_err(&pdev->dev, "failed to load firmware image!\n"); + return -ENODEV; + } + mutex_unlock(&bnad_fwimg_mutex); + + /* + * Allocates sizeof(struct net_device + struct bnad) + * bnad = netdev->priv + */ + netdev = alloc_etherdev(sizeof(struct bnad)); + if (!netdev) { + err = -ENOMEM; + return err; + } + bnad = netdev_priv(netdev); + bnad_lock_init(bnad); + bnad->id = atomic_inc_return(&bna_id) - 1; + + mutex_lock(&bnad->conf_mutex); + /* PCI initialization */ + err = bnad_pci_init(bnad, pdev); + if (err) + goto unlock_mutex; + + /* + * Initialize bnad structure + * Setup relation between pci_dev & netdev + */ + err = bnad_init(bnad, pdev, netdev); + if (err) + goto pci_uninit; + + /* Initialize netdev structure, set up ethtool ops */ + bnad_netdev_init(bnad); + + /* Set link to down state */ + netif_carrier_off(netdev); + + /* Setup the debugfs node for this bfad */ + if (bna_debugfs_enable) + bnad_debugfs_init(bnad); + + /* Get resource requirement form bna */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_res_req(&bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Allocate resources from bna */ + err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + if (err) + goto drv_uninit; + + bna = &bnad->bna; + + /* Setup pcidev_info for bna_init() */ + pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn); + pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn); + pcidev_info.device_id = bnad->pcidev->device; + pcidev_info.pci_bar_kva = bnad->bar0; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad->stats.bna_stats = &bna->stats; + + bnad_enable_msix(bnad); + err = bnad_mbox_irq_alloc(bnad); + if (err) + goto res_free; + + /* Set up timers */ + timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0); + timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0); + timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0); + timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, + 0); + + /* + * Start the chip + * If the call back comes with error, we bail out. + * This is a catastrophic error. + */ + err = bnad_ioceth_enable(bnad); + if (err) { + dev_err(&pdev->dev, "initialization failed err=%d\n", err); + goto probe_success; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) { + bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1, + bna_attr(bna)->num_rxp - 1); + if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) || + bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) + err = -EIO; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (err) + goto disable_ioceth; + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + if (err) { + err = -EIO; + goto disable_ioceth; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Get the burnt-in mac */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr); + bnad_set_netdev_perm_addr(bnad); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + + /* Finally, reguister with net_device layer */ + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "registering net device failed\n"); + goto probe_uninit; + } + set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); + + return 0; + +probe_success: + mutex_unlock(&bnad->conf_mutex); + return 0; + +probe_uninit: + mutex_lock(&bnad->conf_mutex); + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); +disable_ioceth: + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); +res_free: + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); +drv_uninit: + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); + bnad_uninit(bnad); +pci_uninit: + bnad_pci_uninit(pdev); +unlock_mutex: + mutex_unlock(&bnad->conf_mutex); + bnad_lock_uninit(bnad); + free_netdev(netdev); + return err; +} + +static void +bnad_pci_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnad *bnad; + struct bna *bna; + unsigned long flags; + + if (!netdev) + return; + + bnad = netdev_priv(netdev); + bna = &bnad->bna; + + if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags)) + unregister_netdev(netdev); + + mutex_lock(&bnad->conf_mutex); + bnad_ioceth_disable(bnad); + del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer); + del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_uninit(bna); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX); + bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX); + bnad_mbox_irq_free(bnad); + bnad_disable_msix(bnad); + bnad_pci_uninit(pdev); + mutex_unlock(&bnad->conf_mutex); + bnad_lock_uninit(bnad); + /* Remove the debugfs node for this bnad */ + kfree(bnad->regdata); + bnad_debugfs_uninit(bnad); + bnad_uninit(bnad); + free_netdev(netdev); +} + +static const struct pci_device_id bnad_pci_id_table[] = { + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + PCI_DEVICE_ID_BROCADE_CT), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + { + PCI_DEVICE(PCI_VENDOR_ID_BROCADE, + BFA_PCI_DEVICE_ID_CT2), + .class = PCI_CLASS_NETWORK_ETHERNET << 8, + .class_mask = 0xffff00 + }, + {0, }, +}; + +MODULE_DEVICE_TABLE(pci, bnad_pci_id_table); + +static struct pci_driver bnad_pci_driver = { + .name = BNAD_NAME, + .id_table = bnad_pci_id_table, + .probe = bnad_pci_probe, + .remove = bnad_pci_remove, +}; + +static int __init +bnad_module_init(void) +{ + int err; + + bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); + + err = pci_register_driver(&bnad_pci_driver); + if (err < 0) { + pr_err("bna: PCI driver registration failed err=%d\n", err); + return err; + } + + return 0; +} + +static void __exit +bnad_module_exit(void) +{ + pci_unregister_driver(&bnad_pci_driver); + release_firmware(bfi_fw); +} + +module_init(bnad_module_init); +module_exit(bnad_module_exit); + +MODULE_AUTHOR("Brocade"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver"); +MODULE_FIRMWARE(CNA_FW_FILE_CT); +MODULE_FIRMWARE(CNA_FW_FILE_CT2); diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h new file mode 100644 index 0000000000..627a93ce38 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad.h @@ -0,0 +1,423 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#ifndef __BNAD_H__ +#define __BNAD_H__ + +#include <linux/rtnetlink.h> +#include <linux/workqueue.h> +#include <linux/ipv6.h> +#include <linux/etherdevice.h> +#include <linux/mutex.h> +#include <linux/firmware.h> +#include <linux/if_vlan.h> + +/* Fix for IA64 */ +#include <asm/checksum.h> +#include <net/ip6_checksum.h> + +#include <net/ip.h> +#include <net/tcp.h> + +#include "bna.h" + +#define BNAD_TXQ_DEPTH 2048 +#define BNAD_RXQ_DEPTH 2048 + +#define BNAD_MAX_TX 1 +#define BNAD_MAX_TXQ_PER_TX 8 /* 8 priority queues */ +#define BNAD_TXQ_NUM 1 + +#define BNAD_MAX_RX 1 +#define BNAD_MAX_RXP_PER_RX 16 +#define BNAD_MAX_RXQ_PER_RXP 2 + +/* + * Control structure pointed to ccb->ctrl, which + * determines the NAPI / LRO behavior CCB + * There is 1:1 corres. between ccb & ctrl + */ +struct bnad_rx_ctrl { + struct bna_ccb *ccb; + struct bnad *bnad; + unsigned long flags; + struct napi_struct napi; + u64 rx_intr_ctr; + u64 rx_poll_ctr; + u64 rx_schedule; + u64 rx_keep_poll; + u64 rx_complete; +}; + +#define BNAD_RXMODE_PROMISC_DEFAULT BNA_RXMODE_PROMISC + +/* + * GLOBAL #defines (CONSTANTS) + */ +#define BNAD_NAME "bna" +#define BNAD_NAME_LEN 64 + +#define BNAD_MAILBOX_MSIX_INDEX 0 +#define BNAD_MAILBOX_MSIX_VECTORS 1 +#define BNAD_INTX_TX_IB_BITMASK 0x1 +#define BNAD_INTX_RX_IB_BITMASK 0x2 + +#define BNAD_STATS_TIMER_FREQ 1000 /* in msecs */ +#define BNAD_DIM_TIMER_FREQ 1000 /* in msecs */ + +#define BNAD_IOCETH_TIMEOUT 10000 + +#define BNAD_MIN_Q_DEPTH 512 +#define BNAD_MAX_RXQ_DEPTH 16384 +#define BNAD_MAX_TXQ_DEPTH 2048 + +#define BNAD_JUMBO_MTU 9000 + +#define BNAD_NETIF_WAKE_THRESHOLD 8 + +#define BNAD_RXQ_REFILL_THRESHOLD_SHIFT 3 + +/* Bit positions for tcb->flags */ +#define BNAD_TXQ_FREE_SENT 0 +#define BNAD_TXQ_TX_STARTED 1 + +/* Bit positions for rcb->flags */ +#define BNAD_RXQ_STARTED 0 +#define BNAD_RXQ_POST_OK 1 + +/* Resource limits */ +#define BNAD_NUM_TXQ (bnad->num_tx * bnad->num_txq_per_tx) +#define BNAD_NUM_RXP (bnad->num_rx * bnad->num_rxp_per_rx) + +#define BNAD_FRAME_SIZE(_mtu) \ + (ETH_HLEN + VLAN_HLEN + (_mtu) + ETH_FCS_LEN) + +/* + * DATA STRUCTURES + */ + +/* enums */ +enum bnad_intr_source { + BNAD_INTR_TX = 1, + BNAD_INTR_RX = 2 +}; + +enum bnad_link_state { + BNAD_LS_DOWN = 0, + BNAD_LS_UP = 1 +}; + +struct bnad_iocmd_comp { + struct bnad *bnad; + struct completion comp; + int comp_status; +}; + +struct bnad_completion { + struct completion ioc_comp; + struct completion ucast_comp; + struct completion mcast_comp; + struct completion tx_comp; + struct completion rx_comp; + struct completion stats_comp; + struct completion enet_comp; + struct completion mtu_comp; + + u8 ioc_comp_status; + u8 ucast_comp_status; + u8 mcast_comp_status; + u8 tx_comp_status; + u8 rx_comp_status; + u8 stats_comp_status; + u8 port_comp_status; + u8 mtu_comp_status; +}; + +/* Tx Rx Control Stats */ +struct bnad_drv_stats { + u64 netif_queue_stop; + u64 netif_queue_wakeup; + u64 netif_queue_stopped; + u64 tso4; + u64 tso6; + u64 tso_err; + u64 tcpcsum_offload; + u64 udpcsum_offload; + u64 csum_help; + u64 tx_skb_too_short; + u64 tx_skb_stopping; + u64 tx_skb_max_vectors; + u64 tx_skb_mss_too_long; + u64 tx_skb_tso_too_short; + u64 tx_skb_tso_prepare; + u64 tx_skb_non_tso_too_long; + u64 tx_skb_tcp_hdr; + u64 tx_skb_udp_hdr; + u64 tx_skb_csum_err; + u64 tx_skb_headlen_too_long; + u64 tx_skb_headlen_zero; + u64 tx_skb_frag_zero; + u64 tx_skb_len_mismatch; + u64 tx_skb_map_failed; + + u64 hw_stats_updates; + u64 netif_rx_dropped; + + u64 link_toggle; + u64 cee_toggle; + + u64 rxp_info_alloc_failed; + u64 mbox_intr_disabled; + u64 mbox_intr_enabled; + u64 tx_unmap_q_alloc_failed; + u64 rx_unmap_q_alloc_failed; + + u64 rxbuf_alloc_failed; + u64 rxbuf_map_failed; +}; + +/* Complete driver stats */ +struct bnad_stats { + struct bnad_drv_stats drv_stats; + struct bna_stats *bna_stats; +}; + +/* Tx / Rx Resources */ +struct bnad_tx_res_info { + struct bna_res_info res_info[BNA_TX_RES_T_MAX]; +}; + +struct bnad_rx_res_info { + struct bna_res_info res_info[BNA_RX_RES_T_MAX]; +}; + +struct bnad_tx_info { + struct bna_tx *tx; /* 1:1 between tx_info & tx */ + struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; + u32 tx_id; + struct delayed_work tx_cleanup_work; +} ____cacheline_aligned; + +struct bnad_rx_info { + struct bna_rx *rx; /* 1:1 between rx_info & rx */ + + struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; + u32 rx_id; + struct work_struct rx_cleanup_work; +} ____cacheline_aligned; + +struct bnad_tx_vector { + DEFINE_DMA_UNMAP_ADDR(dma_addr); + DEFINE_DMA_UNMAP_LEN(dma_len); +}; + +struct bnad_tx_unmap { + struct sk_buff *skb; + u32 nvecs; + struct bnad_tx_vector vectors[BFI_TX_MAX_VECTORS_PER_WI]; +}; + +struct bnad_rx_vector { + DEFINE_DMA_UNMAP_ADDR(dma_addr); + u32 len; +}; + +struct bnad_rx_unmap { + struct page *page; + struct sk_buff *skb; + struct bnad_rx_vector vector; + u32 page_offset; +}; + +enum bnad_rxbuf_type { + BNAD_RXBUF_NONE = 0, + BNAD_RXBUF_SK_BUFF = 1, + BNAD_RXBUF_PAGE = 2, + BNAD_RXBUF_MULTI_BUFF = 3 +}; + +#define BNAD_RXBUF_IS_SK_BUFF(_type) ((_type) == BNAD_RXBUF_SK_BUFF) +#define BNAD_RXBUF_IS_MULTI_BUFF(_type) ((_type) == BNAD_RXBUF_MULTI_BUFF) + +struct bnad_rx_unmap_q { + int reuse_pi; + int alloc_order; + u32 map_size; + enum bnad_rxbuf_type type; + struct bnad_rx_unmap unmap[] ____cacheline_aligned; +}; + +#define BNAD_PCI_DEV_IS_CAT2(_bnad) \ + ((_bnad)->pcidev->device == BFA_PCI_DEVICE_ID_CT2) + +/* Bit mask values for bnad->cfg_flags */ +#define BNAD_CF_DIM_ENABLED 0x01 /* DIM */ +#define BNAD_CF_PROMISC 0x02 +#define BNAD_CF_ALLMULTI 0x04 +#define BNAD_CF_DEFAULT 0x08 +#define BNAD_CF_MSIX 0x10 /* If in MSIx mode */ + +/* Defines for run_flags bit-mask */ +/* Set, tested & cleared using xxx_bit() functions */ +/* Values indicated bit positions */ +#define BNAD_RF_CEE_RUNNING 0 +#define BNAD_RF_MTU_SET 1 +#define BNAD_RF_MBOX_IRQ_DISABLED 2 +#define BNAD_RF_NETDEV_REGISTERED 3 +#define BNAD_RF_DIM_TIMER_RUNNING 4 +#define BNAD_RF_STATS_TIMER_RUNNING 5 +#define BNAD_RF_TX_PRIO_SET 6 + +struct bnad { + struct net_device *netdev; + u32 id; + + /* Data path */ + struct bnad_tx_info tx_info[BNAD_MAX_TX]; + struct bnad_rx_info rx_info[BNAD_MAX_RX]; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + /* + * These q numbers are global only because + * they are used to calculate MSIx vectors. + * Actually the exact # of queues are per Tx/Rx + * object. + */ + u32 num_tx; + u32 num_rx; + u32 num_txq_per_tx; + u32 num_rxp_per_rx; + + u32 txq_depth; + u32 rxq_depth; + + u8 tx_coalescing_timeo; + u8 rx_coalescing_timeo; + + struct bna_rx_config rx_config[BNAD_MAX_RX] ____cacheline_aligned; + struct bna_tx_config tx_config[BNAD_MAX_TX] ____cacheline_aligned; + + void __iomem *bar0; /* BAR0 address */ + + struct bna bna; + + u32 cfg_flags; + unsigned long run_flags; + + struct pci_dev *pcidev; + u64 mmio_start; + u64 mmio_len; + + u32 msix_num; + struct msix_entry *msix_table; + + struct mutex conf_mutex; + spinlock_t bna_lock ____cacheline_aligned; + + /* Timers */ + struct timer_list ioc_timer; + struct timer_list dim_timer; + struct timer_list stats_timer; + + /* Control path resources, memory & irq */ + struct bna_res_info res_info[BNA_RES_T_MAX]; + struct bna_res_info mod_res_info[BNA_MOD_RES_T_MAX]; + struct bnad_tx_res_info tx_res_info[BNAD_MAX_TX]; + struct bnad_rx_res_info rx_res_info[BNAD_MAX_RX]; + + struct bnad_completion bnad_completions; + + /* Burnt in MAC address */ + u8 perm_addr[ETH_ALEN]; + + struct workqueue_struct *work_q; + + /* Statistics */ + struct bnad_stats stats; + + struct bnad_diag *diag; + + char adapter_name[BNAD_NAME_LEN]; + char port_name[BNAD_NAME_LEN]; + char mbox_irq_name[BNAD_NAME_LEN]; + char wq_name[BNAD_NAME_LEN]; + + /* debugfs specific data */ + char *regdata; + u32 reglen; + struct dentry *bnad_dentry_files[5]; + struct dentry *port_debugfs_root; +}; + +struct bnad_drvinfo { + struct bfa_ioc_attr ioc_attr; + struct bfa_cee_attr cee_attr; + struct bfa_flash_attr flash_attr; + u32 cee_status; + u32 flash_status; +}; + +/* + * EXTERN VARIABLES + */ +extern const struct firmware *bfi_fw; + +/* + * EXTERN PROTOTYPES + */ +u32 *cna_get_firmware_buf(struct pci_dev *pdev); +/* Netdev entry point prototypes */ +void bnad_set_rx_mode(struct net_device *netdev); +struct net_device_stats *bnad_get_netdev_stats(struct net_device *netdev); +int bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr); +int bnad_enable_default_bcast(struct bnad *bnad); +void bnad_restore_vlans(struct bnad *bnad, u32 rx_id); +void bnad_set_ethtool_ops(struct net_device *netdev); +void bnad_cb_completion(void *arg, enum bfa_status status); + +/* Configuration & setup */ +void bnad_tx_coalescing_timeo_set(struct bnad *bnad); +void bnad_rx_coalescing_timeo_set(struct bnad *bnad); + +int bnad_setup_rx(struct bnad *bnad, u32 rx_id); +int bnad_setup_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_tx(struct bnad *bnad, u32 tx_id); +void bnad_destroy_rx(struct bnad *bnad, u32 rx_id); + +/* Timer start/stop protos */ +void bnad_dim_timer_start(struct bnad *bnad); + +/* Statistics */ +void bnad_netdev_qstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); +void bnad_netdev_hwstats_fill(struct bnad *bnad, + struct rtnl_link_stats64 *stats); + +/* Debugfs */ +void bnad_debugfs_init(struct bnad *bnad); +void bnad_debugfs_uninit(struct bnad *bnad); + +/* MACROS */ +/* To set & get the stats counters */ +#define BNAD_UPDATE_CTR(_bnad, _ctr) \ + (((_bnad)->stats.drv_stats._ctr)++) + +#define BNAD_GET_CTR(_bnad, _ctr) ((_bnad)->stats.drv_stats._ctr) + +#define bnad_enable_rx_irq_unsafe(_ccb) \ +{ \ + if (likely(test_bit(BNAD_RXQ_STARTED, &(_ccb)->rcb[0]->flags))) {\ + bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ + (_ccb)->rx_coalescing_timeo); \ + bna_ib_ack((_ccb)->i_dbell, 0); \ + } \ +} + +#endif /* __BNAD_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c new file mode 100644 index 0000000000..7246e13dd5 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include <linux/debugfs.h> +#include <linux/module.h> +#include "bnad.h" + +/* + * BNA debufs interface + * + * To access the interface, debugfs file system should be mounted + * if not already mounted using: + * mount -t debugfs none /sys/kernel/debug + * + * BNA Hierarchy: + * - bna/pci_dev:<pci_name> + * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna + * + * Debugging service available per pci_dev: + * fwtrc: To collect current firmware trace. + * fwsave: To collect last saved fw trace as a result of firmware crash. + * regwr: To write one word to chip register + * regrd: To read one or more words from chip register. + */ + +struct bnad_debug_info { + char *debug_buffer; + void *i_private; + int buffer_len; +}; + +static int +bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + netdev_warn(bnad->netdev, "failed to collect fwtrc\n"); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_fwsave(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *fw_debug; + unsigned long flags; + int rc; + + fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!fw_debug) + return -ENOMEM; + + fw_debug->buffer_len = BNA_DBG_FWTRC_LEN; + + fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL); + if (!fw_debug->debug_buffer) { + kfree(fw_debug); + fw_debug = NULL; + return -ENOMEM; + } + + spin_lock_irqsave(&bnad->bna_lock, flags); + rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc, + fw_debug->debug_buffer, + &fw_debug->buffer_len); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) { + kfree(fw_debug->debug_buffer); + fw_debug->debug_buffer = NULL; + kfree(fw_debug); + fw_debug = NULL; + netdev_warn(bnad->netdev, "failed to collect fwsave\n"); + return -ENOMEM; + } + + file->private_data = fw_debug; + + return 0; +} + +static int +bnad_debugfs_open_reg(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *reg_debug; + + reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!reg_debug) + return -ENOMEM; + + reg_debug->i_private = inode->i_private; + + file->private_data = reg_debug; + + return 0; +} + +static int +bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len) +{ + struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer; + struct bnad_iocmd_comp fcomp; + unsigned long flags = 0; + int ret = BFA_STATUS_FAILED; + + /* Get IOC info */ + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + /* Retrieve CEE related info */ + fcomp.bnad = bnad; + fcomp.comp_status = 0; + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->cee_status = fcomp.comp_status; + + /* Retrieve flash partition info */ + fcomp.comp_status = 0; + reinit_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto out; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + drvinfo->flash_status = fcomp.comp_status; +out: + return ret; +} + +static int +bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file) +{ + struct bnad *bnad = inode->i_private; + struct bnad_debug_info *drv_info; + int rc; + + drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL); + if (!drv_info) + return -ENOMEM; + + drv_info->buffer_len = sizeof(struct bnad_drvinfo); + + drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL); + if (!drv_info->debug_buffer) { + kfree(drv_info); + drv_info = NULL; + return -ENOMEM; + } + + mutex_lock(&bnad->conf_mutex); + rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer, + drv_info->buffer_len); + mutex_unlock(&bnad->conf_mutex); + if (rc != BFA_STATUS_OK) { + kfree(drv_info->debug_buffer); + drv_info->debug_buffer = NULL; + kfree(drv_info); + drv_info = NULL; + netdev_warn(bnad->netdev, "failed to collect drvinfo\n"); + return -ENOMEM; + } + + file->private_data = drv_info; + + return 0; +} + +/* Changes the current file position */ +static loff_t +bnad_debugfs_lseek(struct file *file, loff_t offset, int orig) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return -EINVAL; + + return fixed_size_llseek(file, offset, orig, debug->buffer_len); +} + +static ssize_t +bnad_debugfs_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug || !debug->debug_buffer) + return 0; + + return simple_read_from_buffer(buf, nbytes, pos, + debug->debug_buffer, debug->buffer_len); +} + +#define BFA_REG_CT_ADDRSZ (0x40000) +#define BFA_REG_CB_ADDRSZ (0x20000) +#define BFA_REG_ADDRSZ(__ioc) \ + ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \ + BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)) +#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1) + +/* + * Function to check if the register offset passed is valid. + */ +static int +bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len) +{ + u8 area; + + /* check [16:15] */ + area = (offset >> 15) & 0x7; + if (area == 0) { + /* PCIe core register */ + if (offset + (len << 2) > 0x8000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else if (area == 0x1) { + /* CB 32 KB memory page */ + if (offset + (len << 2) > 0x10000) /* 8k dwords or 32KB */ + return BFA_STATUS_EINVAL; + } else { + /* CB register space 64KB */ + if (offset + (len << 2) > BFA_REG_ADDRMSK(ioc)) + return BFA_STATUS_EINVAL; + } + return BFA_STATUS_OK; +} + +static ssize_t +bnad_debugfs_read_regrd(struct file *file, char __user *buf, + size_t nbytes, loff_t *pos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + ssize_t rc; + + if (!bnad->regdata) + return 0; + + rc = simple_read_from_buffer(buf, nbytes, pos, + bnad->regdata, bnad->reglen); + + if ((*pos + nbytes) >= bnad->reglen) { + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + } + + return rc; +} + +static ssize_t +bnad_debugfs_write_regrd(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *regrd_debug = file->private_data; + struct bnad *bnad = (struct bnad *)regrd_debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int rc, i; + u32 addr, len; + u32 *regbuf; + void __iomem *rb, *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Copy the user space buf */ + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rc = sscanf(kern_buf, "%x:%x", &addr, &len); + if (rc < 2 || len > UINT_MAX >> 2) { + netdev_warn(bnad->netdev, "failed to read user buffer\n"); + kfree(kern_buf); + return -EINVAL; + } + + kfree(kern_buf); + kfree(bnad->regdata); + bnad->reglen = 0; + + bnad->regdata = kzalloc(len << 2, GFP_KERNEL); + if (!bnad->regdata) + return -ENOMEM; + + bnad->reglen = len << 2; + rb = bfa_ioc_bar0(ioc); + addr &= BFA_REG_ADDRMSK(ioc); + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, len); + if (rc) { + netdev_warn(bnad->netdev, "failed reg offset check\n"); + kfree(bnad->regdata); + bnad->regdata = NULL; + bnad->reglen = 0; + return -EINVAL; + } + + reg_addr = rb + addr; + regbuf = (u32 *)bnad->regdata; + spin_lock_irqsave(&bnad->bna_lock, flags); + for (i = 0; i < len; i++) { + *regbuf = readl(reg_addr); + regbuf++; + reg_addr += sizeof(u32); + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static ssize_t +bnad_debugfs_write_regwr(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct bnad_debug_info *debug = file->private_data; + struct bnad *bnad = (struct bnad *)debug->i_private; + struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc; + int rc; + u32 addr, val; + void __iomem *reg_addr; + unsigned long flags; + void *kern_buf; + + /* Copy the user space buf */ + kern_buf = memdup_user(buf, nbytes); + if (IS_ERR(kern_buf)) + return PTR_ERR(kern_buf); + + rc = sscanf(kern_buf, "%x:%x", &addr, &val); + if (rc < 2) { + netdev_warn(bnad->netdev, "failed to read user buffer\n"); + kfree(kern_buf); + return -EINVAL; + } + kfree(kern_buf); + + addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */ + + /* offset and len sanity check */ + rc = bna_reg_offset_check(ioc, addr, 1); + if (rc) { + netdev_warn(bnad->netdev, "failed reg offset check\n"); + return -EINVAL; + } + + reg_addr = (bfa_ioc_bar0(ioc)) + addr; + spin_lock_irqsave(&bnad->bna_lock, flags); + writel(val, reg_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + return nbytes; +} + +static int +bnad_debugfs_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + file->private_data = NULL; + kfree(debug); + return 0; +} + +static int +bnad_debugfs_buffer_release(struct inode *inode, struct file *file) +{ + struct bnad_debug_info *debug = file->private_data; + + if (!debug) + return 0; + + kfree(debug->debug_buffer); + + file->private_data = NULL; + kfree(debug); + debug = NULL; + return 0; +} + +static const struct file_operations bnad_debugfs_op_fwtrc = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwtrc, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_fwsave = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_fwsave, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +static const struct file_operations bnad_debugfs_op_regrd = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read_regrd, + .write = bnad_debugfs_write_regrd, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_regwr = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_reg, + .llseek = bnad_debugfs_lseek, + .write = bnad_debugfs_write_regwr, + .release = bnad_debugfs_release, +}; + +static const struct file_operations bnad_debugfs_op_drvinfo = { + .owner = THIS_MODULE, + .open = bnad_debugfs_open_drvinfo, + .llseek = bnad_debugfs_lseek, + .read = bnad_debugfs_read, + .release = bnad_debugfs_buffer_release, +}; + +struct bnad_debugfs_entry { + const char *name; + umode_t mode; + const struct file_operations *fops; +}; + +static const struct bnad_debugfs_entry bnad_debugfs_files[] = { + { "fwtrc", S_IFREG | 0444, &bnad_debugfs_op_fwtrc, }, + { "fwsave", S_IFREG | 0444, &bnad_debugfs_op_fwsave, }, + { "regrd", S_IFREG | 0644, &bnad_debugfs_op_regrd, }, + { "regwr", S_IFREG | 0200, &bnad_debugfs_op_regwr, }, + { "drvinfo", S_IFREG | 0444, &bnad_debugfs_op_drvinfo, }, +}; + +static struct dentry *bna_debugfs_root; +static atomic_t bna_debugfs_port_count; + +/* Initialize debugfs interface for BNA */ +void +bnad_debugfs_init(struct bnad *bnad) +{ + const struct bnad_debugfs_entry *file; + char name[64]; + int i; + + /* Setup the BNA debugfs root directory*/ + if (!bna_debugfs_root) { + bna_debugfs_root = debugfs_create_dir("bna", NULL); + atomic_set(&bna_debugfs_port_count, 0); + if (!bna_debugfs_root) { + netdev_warn(bnad->netdev, + "debugfs root dir creation failed\n"); + return; + } + } + + /* Setup the pci_dev debugfs directory for the port */ + snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev)); + if (!bnad->port_debugfs_root) { + bnad->port_debugfs_root = + debugfs_create_dir(name, bna_debugfs_root); + + atomic_inc(&bna_debugfs_port_count); + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + file = &bnad_debugfs_files[i]; + bnad->bnad_dentry_files[i] = + debugfs_create_file(file->name, + file->mode, + bnad->port_debugfs_root, + bnad, + file->fops); + if (!bnad->bnad_dentry_files[i]) { + netdev_warn(bnad->netdev, + "create %s entry failed\n", + file->name); + return; + } + } + } +} + +/* Uninitialize debugfs interface for BNA */ +void +bnad_debugfs_uninit(struct bnad *bnad) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) { + if (bnad->bnad_dentry_files[i]) { + debugfs_remove(bnad->bnad_dentry_files[i]); + bnad->bnad_dentry_files[i] = NULL; + } + } + + /* Remove the pci_dev debugfs directory for the port */ + if (bnad->port_debugfs_root) { + debugfs_remove(bnad->port_debugfs_root); + bnad->port_debugfs_root = NULL; + atomic_dec(&bna_debugfs_port_count); + } + + /* Remove the BNA debugfs root directory */ + if (atomic_read(&bna_debugfs_port_count) == 0) { + debugfs_remove(bna_debugfs_root); + bna_debugfs_root = NULL; + } +} diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c new file mode 100644 index 0000000000..df10edff56 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -0,0 +1,1098 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#include "cna.h" + +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/ethtool.h> +#include <linux/rtnetlink.h> + +#include "bna.h" + +#include "bnad.h" + +#define BNAD_NUM_TXF_COUNTERS 12 +#define BNAD_NUM_RXF_COUNTERS 10 +#define BNAD_NUM_CQ_COUNTERS (3 + 5) +#define BNAD_NUM_RXQ_COUNTERS 7 +#define BNAD_NUM_TXQ_COUNTERS 5 + +static const char *bnad_net_stats_strings[] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", + "tx_errors", + "rx_dropped", + "tx_dropped", + "multicast", + "collisions", + "rx_length_errors", + "rx_crc_errors", + "rx_frame_errors", + "tx_fifo_errors", + + "netif_queue_stop", + "netif_queue_wakeup", + "netif_queue_stopped", + "tso4", + "tso6", + "tso_err", + "tcpcsum_offload", + "udpcsum_offload", + "csum_help", + "tx_skb_too_short", + "tx_skb_stopping", + "tx_skb_max_vectors", + "tx_skb_mss_too_long", + "tx_skb_tso_too_short", + "tx_skb_tso_prepare", + "tx_skb_non_tso_too_long", + "tx_skb_tcp_hdr", + "tx_skb_udp_hdr", + "tx_skb_csum_err", + "tx_skb_headlen_too_long", + "tx_skb_headlen_zero", + "tx_skb_frag_zero", + "tx_skb_len_mismatch", + "tx_skb_map_failed", + "hw_stats_updates", + "netif_rx_dropped", + + "link_toggle", + "cee_toggle", + + "rxp_info_alloc_failed", + "mbox_intr_disabled", + "mbox_intr_enabled", + "tx_unmap_q_alloc_failed", + "rx_unmap_q_alloc_failed", + "rxbuf_alloc_failed", + "rxbuf_map_failed", + + "mac_stats_clr_cnt", + "mac_frame_64", + "mac_frame_65_127", + "mac_frame_128_255", + "mac_frame_256_511", + "mac_frame_512_1023", + "mac_frame_1024_1518", + "mac_frame_1518_1522", + "mac_rx_bytes", + "mac_rx_packets", + "mac_rx_fcs_error", + "mac_rx_multicast", + "mac_rx_broadcast", + "mac_rx_control_frames", + "mac_rx_pause", + "mac_rx_unknown_opcode", + "mac_rx_alignment_error", + "mac_rx_frame_length_error", + "mac_rx_code_error", + "mac_rx_carrier_sense_error", + "mac_rx_undersize", + "mac_rx_oversize", + "mac_rx_fragments", + "mac_rx_jabber", + "mac_rx_drop", + + "mac_tx_bytes", + "mac_tx_packets", + "mac_tx_multicast", + "mac_tx_broadcast", + "mac_tx_pause", + "mac_tx_deferral", + "mac_tx_excessive_deferral", + "mac_tx_single_collision", + "mac_tx_multiple_collision", + "mac_tx_late_collision", + "mac_tx_excessive_collision", + "mac_tx_total_collision", + "mac_tx_pause_honored", + "mac_tx_drop", + "mac_tx_jabber", + "mac_tx_fcs_error", + "mac_tx_control_frame", + "mac_tx_oversize", + "mac_tx_undersize", + "mac_tx_fragments", + + "bpc_tx_pause_0", + "bpc_tx_pause_1", + "bpc_tx_pause_2", + "bpc_tx_pause_3", + "bpc_tx_pause_4", + "bpc_tx_pause_5", + "bpc_tx_pause_6", + "bpc_tx_pause_7", + "bpc_tx_zero_pause_0", + "bpc_tx_zero_pause_1", + "bpc_tx_zero_pause_2", + "bpc_tx_zero_pause_3", + "bpc_tx_zero_pause_4", + "bpc_tx_zero_pause_5", + "bpc_tx_zero_pause_6", + "bpc_tx_zero_pause_7", + "bpc_tx_first_pause_0", + "bpc_tx_first_pause_1", + "bpc_tx_first_pause_2", + "bpc_tx_first_pause_3", + "bpc_tx_first_pause_4", + "bpc_tx_first_pause_5", + "bpc_tx_first_pause_6", + "bpc_tx_first_pause_7", + + "bpc_rx_pause_0", + "bpc_rx_pause_1", + "bpc_rx_pause_2", + "bpc_rx_pause_3", + "bpc_rx_pause_4", + "bpc_rx_pause_5", + "bpc_rx_pause_6", + "bpc_rx_pause_7", + "bpc_rx_zero_pause_0", + "bpc_rx_zero_pause_1", + "bpc_rx_zero_pause_2", + "bpc_rx_zero_pause_3", + "bpc_rx_zero_pause_4", + "bpc_rx_zero_pause_5", + "bpc_rx_zero_pause_6", + "bpc_rx_zero_pause_7", + "bpc_rx_first_pause_0", + "bpc_rx_first_pause_1", + "bpc_rx_first_pause_2", + "bpc_rx_first_pause_3", + "bpc_rx_first_pause_4", + "bpc_rx_first_pause_5", + "bpc_rx_first_pause_6", + "bpc_rx_first_pause_7", + + "rad_rx_frames", + "rad_rx_octets", + "rad_rx_vlan_frames", + "rad_rx_ucast", + "rad_rx_ucast_octets", + "rad_rx_ucast_vlan", + "rad_rx_mcast", + "rad_rx_mcast_octets", + "rad_rx_mcast_vlan", + "rad_rx_bcast", + "rad_rx_bcast_octets", + "rad_rx_bcast_vlan", + "rad_rx_drops", + + "rlb_rad_rx_frames", + "rlb_rad_rx_octets", + "rlb_rad_rx_vlan_frames", + "rlb_rad_rx_ucast", + "rlb_rad_rx_ucast_octets", + "rlb_rad_rx_ucast_vlan", + "rlb_rad_rx_mcast", + "rlb_rad_rx_mcast_octets", + "rlb_rad_rx_mcast_vlan", + "rlb_rad_rx_bcast", + "rlb_rad_rx_bcast_octets", + "rlb_rad_rx_bcast_vlan", + "rlb_rad_rx_drops", + + "fc_rx_ucast_octets", + "fc_rx_ucast", + "fc_rx_ucast_vlan", + "fc_rx_mcast_octets", + "fc_rx_mcast", + "fc_rx_mcast_vlan", + "fc_rx_bcast_octets", + "fc_rx_bcast", + "fc_rx_bcast_vlan", + + "fc_tx_ucast_octets", + "fc_tx_ucast", + "fc_tx_ucast_vlan", + "fc_tx_mcast_octets", + "fc_tx_mcast", + "fc_tx_mcast_vlan", + "fc_tx_bcast_octets", + "fc_tx_bcast", + "fc_tx_bcast_vlan", + "fc_tx_parity_errors", + "fc_tx_timeout", + "fc_tx_fid_parity_errors", +}; + +#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings) + +static int +bnad_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseLR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseCR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10000baseLR_Full); + cmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + cmd->base.port = PORT_FIBRE; + cmd->base.phy_address = 0; + + if (netif_carrier_ok(netdev)) { + cmd->base.speed = SPEED_10000; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int +bnad_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + /* 10G full duplex setting supported only */ + if (cmd->base.autoneg == AUTONEG_ENABLE) + return -EOPNOTSUPP; + + if ((cmd->base.speed == SPEED_10000) && + (cmd->base.duplex == DUPLEX_FULL)) + return 0; + + return -EOPNOTSUPP; +} + +static void +bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bfa_ioc_attr *ioc_attr; + unsigned long flags; + + strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver)); + + ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL); + if (ioc_attr) { + spin_lock_irqsave(&bnad->bna_lock, flags); + bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver, + sizeof(drvinfo->fw_version)); + kfree(ioc_attr); + } + + strscpy(drvinfo->bus_info, pci_name(bnad->pcidev), + sizeof(drvinfo->bus_info)); +} + +static void +bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) +{ + wolinfo->supported = 0; + wolinfo->wolopts = 0; +} + +static int bnad_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + /* Lock rqd. to access bnad->bna_lock */ + spin_lock_irqsave(&bnad->bna_lock, flags); + coalesce->use_adaptive_rx_coalesce = + (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false; + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo * + BFI_COALESCING_TIMER_UNIT; + coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT; + + return 0; +} + +static int bnad_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + int to_del = 0; + + if (coalesce->rx_coalesce_usecs == 0 || + coalesce->rx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + if (coalesce->tx_coalesce_usecs == 0 || + coalesce->tx_coalesce_usecs > + BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + /* + * Do not need to store rx_coalesce_usecs here + * Every time DIM is disabled, we can get it from the + * stack. + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + if (coalesce->use_adaptive_rx_coalesce) { + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) { + bnad->cfg_flags |= BNAD_CF_DIM_ENABLED; + bnad_dim_timer_start(bnad); + } + } else { + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) { + bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED; + if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED && + test_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags)) { + clear_bit(BNAD_RF_DIM_TIMER_RUNNING, + &bnad->run_flags); + to_del = 1; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + if (to_del) + del_timer_sync(&bnad->dim_timer); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_rx_coalescing_timeo_set(bnad); + } + } + if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + bnad_tx_coalescing_timeo_set(bnad); + } + + if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT) { + bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs / + BFI_COALESCING_TIMER_UNIT; + + if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) + bnad_rx_coalescing_timeo_set(bnad); + + } + + /* Add Tx Inter-pkt DMA count? */ + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void +bnad_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) +{ + struct bnad *bnad = netdev_priv(netdev); + + ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH; + ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH; + + ringparam->rx_pending = bnad->rxq_depth; + ringparam->tx_pending = bnad->txq_depth; +} + +static int +bnad_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ringparam, + struct kernel_ethtool_ringparam *kernel_ringparam, + struct netlink_ext_ack *extack) +{ + int i, current_err, err = 0; + struct bnad *bnad = netdev_priv(netdev); + unsigned long flags; + + mutex_lock(&bnad->conf_mutex); + if (ringparam->rx_pending == bnad->rxq_depth && + ringparam->tx_pending == bnad->txq_depth) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH || + ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH || + !is_power_of_2(ringparam->rx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH || + ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH || + !is_power_of_2(ringparam->tx_pending)) { + mutex_unlock(&bnad->conf_mutex); + return -EINVAL; + } + + if (ringparam->rx_pending != bnad->rxq_depth) { + bnad->rxq_depth = ringparam->rx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + bnad_destroy_rx(bnad, i); + current_err = bnad_setup_rx(bnad, i); + if (current_err && !err) + err = current_err; + } + + if (!err && bnad->rx_info[0].rx) { + /* restore rx configuration */ + bnad_restore_vlans(bnad, 0); + bnad_enable_default_bcast(bnad); + spin_lock_irqsave(&bnad->bna_lock, flags); + bnad_mac_addr_set_locked(bnad, netdev->dev_addr); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI | + BNAD_CF_PROMISC); + bnad_set_rx_mode(netdev); + } + } + if (ringparam->tx_pending != bnad->txq_depth) { + bnad->txq_depth = ringparam->tx_pending; + if (!netif_running(netdev)) { + mutex_unlock(&bnad->conf_mutex); + return 0; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + bnad_destroy_tx(bnad, i); + current_err = bnad_setup_tx(bnad, i); + if (current_err && !err) + err = current_err; + } + } + + mutex_unlock(&bnad->conf_mutex); + return err; +} + +static void +bnad_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + + pauseparam->autoneg = 0; + pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause; + pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause; +} + +static int +bnad_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bna_pause_config pause_config; + unsigned long flags; + + if (pauseparam->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + mutex_lock(&bnad->conf_mutex); + if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause || + pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) { + pause_config.rx_pause = pauseparam->rx_pause; + pause_config.tx_pause = pauseparam->tx_pause; + spin_lock_irqsave(&bnad->bna_lock, flags); + bna_enet_pause_config(&bnad->bna.enet, &pause_config); + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + mutex_unlock(&bnad->conf_mutex); + return 0; +} + +static void bnad_get_txf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "txf%d_ucast_octets", f_num); + ethtool_sprintf(string, "txf%d_ucast", f_num); + ethtool_sprintf(string, "txf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "txf%d_mcast_octets", f_num); + ethtool_sprintf(string, "txf%d_mcast", f_num); + ethtool_sprintf(string, "txf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_bcast_octets", f_num); + ethtool_sprintf(string, "txf%d_bcast", f_num); + ethtool_sprintf(string, "txf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "txf%d_errors", f_num); + ethtool_sprintf(string, "txf%d_filter_vlan", f_num); + ethtool_sprintf(string, "txf%d_filter_mac_sa", f_num); +} + +static void bnad_get_rxf_strings(u8 **string, int f_num) +{ + ethtool_sprintf(string, "rxf%d_ucast_octets", f_num); + ethtool_sprintf(string, "rxf%d_ucast", f_num); + ethtool_sprintf(string, "rxf%d_ucast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_mcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_mcast", f_num); + ethtool_sprintf(string, "rxf%d_mcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_bcast_octets", f_num); + ethtool_sprintf(string, "rxf%d_bcast", f_num); + ethtool_sprintf(string, "rxf%d_bcast_vlan", f_num); + ethtool_sprintf(string, "rxf%d_frame_drops", f_num); +} + +static void bnad_get_cq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "cq%d_producer_index", q_num); + ethtool_sprintf(string, "cq%d_consumer_index", q_num); + ethtool_sprintf(string, "cq%d_hw_producer_index", q_num); + ethtool_sprintf(string, "cq%d_intr", q_num); + ethtool_sprintf(string, "cq%d_poll", q_num); + ethtool_sprintf(string, "cq%d_schedule", q_num); + ethtool_sprintf(string, "cq%d_keep_poll", q_num); + ethtool_sprintf(string, "cq%d_complete", q_num); +} + +static void bnad_get_rxq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "rxq%d_packets", q_num); + ethtool_sprintf(string, "rxq%d_bytes", q_num); + ethtool_sprintf(string, "rxq%d_packets_with_error", q_num); + ethtool_sprintf(string, "rxq%d_allocbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_mapbuf_failed", q_num); + ethtool_sprintf(string, "rxq%d_producer_index", q_num); + ethtool_sprintf(string, "rxq%d_consumer_index", q_num); +} + +static void bnad_get_txq_strings(u8 **string, int q_num) +{ + ethtool_sprintf(string, "txq%d_packets", q_num); + ethtool_sprintf(string, "txq%d_bytes", q_num); + ethtool_sprintf(string, "txq%d_producer_index", q_num); + ethtool_sprintf(string, "txq%d_consumer_index", q_num); + ethtool_sprintf(string, "txq%d_hw_consumer_index", q_num); +} + +static void +bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, q_num; + u32 bmap; + + if (stringset != ETH_SS_STATS) + return; + + mutex_lock(&bnad->conf_mutex); + + for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) { + BUG_ON(!(strlen(bnad_net_stats_strings[i]) < ETH_GSTRING_LEN)); + ethtool_sprintf(&string, bnad_net_stats_strings[i]); + } + + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + bnad_get_txf_strings(&string, i); + bmap >>= 1; + } + + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++, bmap >>= 1) { + if (bmap & 1) + bnad_get_rxf_strings(&string, i); + bmap >>= 1; + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + bnad_get_cq_strings(&string, q_num++); + } + + q_num = 0; + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) { + bnad_get_rxq_strings(&string, q_num++); + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + bnad_get_rxq_strings(&string, q_num++); + } + } + + q_num = 0; + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + bnad_get_txq_strings(&string, q_num++); + } + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_stats_count_locked(struct net_device *netdev) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0; + u32 bmap; + + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + txf_active_num++; + bmap >>= 1; + } + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) + rxf_active_num++; + bmap >>= 1; + } + count = BNAD_ETHTOOL_STATS_NUM + + txf_active_num * BNAD_NUM_TXF_COUNTERS + + rxf_active_num * BNAD_NUM_RXF_COUNTERS; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS; + count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq) + count += BNAD_NUM_RXQ_COUNTERS; + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS; + } + return count; +} + +static int +bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi) +{ + int i, j; + struct bna_rcb *rcb = NULL; + struct bna_tcb *tcb = NULL; + + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) { + buf[bi++] = bnad->rx_info[i].rx_ctrl[j]. + ccb->producer_index; + buf[bi++] = 0; /* ccb->consumer_index */ + buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j]. + ccb->hw_producer_index); + + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_intr_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_poll_ctr; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_schedule; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_keep_poll; + buf[bi++] = bnad->rx_info[i]. + rx_ctrl[j].rx_complete; + } + } + for (i = 0; i < bnad->num_rx; i++) { + if (!bnad->rx_info[i].rx) + continue; + for (j = 0; j < bnad->num_rxp_per_rx; j++) + if (bnad->rx_info[i].rx_ctrl[j].ccb) { + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[0]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[0]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] && + bnad->rx_info[i].rx_ctrl[j].ccb-> + rcb[1]->rxq) { + rcb = bnad->rx_info[i].rx_ctrl[j]. + ccb->rcb[1]; + buf[bi++] = rcb->rxq->rx_packets; + buf[bi++] = rcb->rxq->rx_bytes; + buf[bi++] = rcb->rxq-> + rx_packets_with_error; + buf[bi++] = rcb->rxq-> + rxbuf_alloc_failed; + buf[bi++] = rcb->rxq->rxbuf_map_failed; + buf[bi++] = rcb->producer_index; + buf[bi++] = rcb->consumer_index; + } + } + } + + for (i = 0; i < bnad->num_tx; i++) { + if (!bnad->tx_info[i].tx) + continue; + for (j = 0; j < bnad->num_txq_per_tx; j++) + if (bnad->tx_info[i].tcb[j] && + bnad->tx_info[i].tcb[j]->txq) { + tcb = bnad->tx_info[i].tcb[j]; + buf[bi++] = tcb->txq->tx_packets; + buf[bi++] = tcb->txq->tx_bytes; + buf[bi++] = tcb->producer_index; + buf[bi++] = tcb->consumer_index; + buf[bi++] = *(tcb->hw_consumer_index); + } + } + + return bi; +} + +static void +bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, + u64 *buf) +{ + struct bnad *bnad = netdev_priv(netdev); + int i, j, bi = 0; + unsigned long flags; + struct rtnl_link_stats64 net_stats64; + u64 *stats64; + u32 bmap; + + mutex_lock(&bnad->conf_mutex); + if (bnad_get_stats_count_locked(netdev) != stats->n_stats) { + mutex_unlock(&bnad->conf_mutex); + return; + } + + /* + * Used bna_lock to sync reads from bna_stats, which is written + * under the same lock + */ + spin_lock_irqsave(&bnad->bna_lock, flags); + + memset(&net_stats64, 0, sizeof(net_stats64)); + bnad_netdev_qstats_fill(bnad, &net_stats64); + bnad_netdev_hwstats_fill(bnad, &net_stats64); + + buf[bi++] = net_stats64.rx_packets; + buf[bi++] = net_stats64.tx_packets; + buf[bi++] = net_stats64.rx_bytes; + buf[bi++] = net_stats64.tx_bytes; + buf[bi++] = net_stats64.rx_errors; + buf[bi++] = net_stats64.tx_errors; + buf[bi++] = net_stats64.rx_dropped; + buf[bi++] = net_stats64.tx_dropped; + buf[bi++] = net_stats64.multicast; + buf[bi++] = net_stats64.collisions; + buf[bi++] = net_stats64.rx_length_errors; + buf[bi++] = net_stats64.rx_crc_errors; + buf[bi++] = net_stats64.rx_frame_errors; + buf[bi++] = net_stats64.tx_fifo_errors; + + /* Get netif_queue_stopped from stack */ + bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev); + + /* Fill driver stats into ethtool buffers */ + stats64 = (u64 *)&bnad->stats.drv_stats; + for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) + buf[bi++] = stats64[i]; + + /* Fill hardware stats excluding the rxf/txf into ethtool bufs */ + stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats; + for (i = 0; + i < offsetof(struct bfi_enet_stats, rxf_stats[0]) / + sizeof(u64); + i++) + buf[bi++] = stats64[i]; + + /* Fill txf stats into ethtool buffers */ + bmap = bna_tx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.txf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_txf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill rxf stats into ethtool buffers */ + bmap = bna_rx_rid_mask(&bnad->bna); + for (i = 0; bmap; i++) { + if (bmap & 1) { + stats64 = (u64 *)&bnad->stats.bna_stats-> + hw_stats.rxf_stats[i]; + for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) / + sizeof(u64); j++) + buf[bi++] = stats64[j]; + } + bmap >>= 1; + } + + /* Fill per Q stats into ethtool buffers */ + bi = bnad_per_q_stats_fill(bnad, buf, bi); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + + mutex_unlock(&bnad->conf_mutex); +} + +static int +bnad_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return bnad_get_stats_count_locked(netdev); + default: + return -EOPNOTSUPP; + } +} + +static u32 +bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset, + u32 *base_offset) +{ + struct bfa_flash_attr *flash_attr; + struct bnad_iocmd_comp fcomp; + u32 i, flash_part = 0, ret; + unsigned long flags = 0; + + flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL); + if (!flash_attr) + return 0; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + kfree(flash_attr); + return 0; + } + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; + + /* Check for the flash type & base offset value */ + if (ret == BFA_STATUS_OK) { + for (i = 0; i < flash_attr->npart; i++) { + if (offset >= flash_attr->part[i].part_off && + offset < (flash_attr->part[i].part_off + + flash_attr->part[i].part_size)) { + flash_part = flash_attr->part[i].part_type; + *base_offset = flash_attr->part[i].part_off; + break; + } + } + } + kfree(flash_attr); + return flash_part; +} + +static int +bnad_get_eeprom_len(struct net_device *netdev) +{ + return BFA_TOTAL_FLASH_SIZE; +} + +static int +bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Fill the magic value */ + eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16); + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part == 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static int +bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + u32 flash_part = 0, base_offset = 0; + unsigned long flags = 0; + int ret = 0; + + /* Check if the flash update request is valid */ + if (eeprom->magic != (bnad->pcidev->vendor | + (bnad->pcidev->device << 16))) + return -EINVAL; + + /* Query the flash partition based on the offset */ + flash_part = bnad_get_flash_partition_by_offset(bnad, + eeprom->offset, &base_offset); + if (flash_part == 0) + return -EFAULT; + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irqsave(&bnad->bna_lock, flags); + ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part, + bnad->id, bytes, eeprom->len, + eeprom->offset - base_offset, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + spin_unlock_irqrestore(&bnad->bna_lock, flags); + goto done; + } + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + wait_for_completion(&fcomp.comp); + ret = fcomp.comp_status; +done: + return ret; +} + +static int +bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash) +{ + struct bnad *bnad = netdev_priv(netdev); + struct bnad_iocmd_comp fcomp; + const struct firmware *fw; + int ret = 0; + + ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev); + if (ret) { + netdev_err(netdev, "can't load firmware %s\n", eflash->data); + goto out; + } + + fcomp.bnad = bnad; + fcomp.comp_status = 0; + + init_completion(&fcomp.comp); + spin_lock_irq(&bnad->bna_lock); + ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG, + bnad->id, (u8 *)fw->data, fw->size, 0, + bnad_cb_completion, &fcomp); + if (ret != BFA_STATUS_OK) { + netdev_warn(netdev, "flash update failed with err=%d\n", ret); + ret = -EIO; + spin_unlock_irq(&bnad->bna_lock); + goto out; + } + + spin_unlock_irq(&bnad->bna_lock); + wait_for_completion(&fcomp.comp); + if (fcomp.comp_status != BFA_STATUS_OK) { + ret = -EIO; + netdev_warn(netdev, + "firmware image update failed with err=%d\n", + fcomp.comp_status); + } +out: + release_firmware(fw); + return ret; +} + +static const struct ethtool_ops bnad_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE_RX, + .get_drvinfo = bnad_get_drvinfo, + .get_wol = bnad_get_wol, + .get_link = ethtool_op_get_link, + .get_coalesce = bnad_get_coalesce, + .set_coalesce = bnad_set_coalesce, + .get_ringparam = bnad_get_ringparam, + .set_ringparam = bnad_set_ringparam, + .get_pauseparam = bnad_get_pauseparam, + .set_pauseparam = bnad_set_pauseparam, + .get_strings = bnad_get_strings, + .get_ethtool_stats = bnad_get_ethtool_stats, + .get_sset_count = bnad_get_sset_count, + .get_eeprom_len = bnad_get_eeprom_len, + .get_eeprom = bnad_get_eeprom, + .set_eeprom = bnad_set_eeprom, + .flash_device = bnad_flash_device, + .get_ts_info = ethtool_op_get_ts_info, + .get_link_ksettings = bnad_get_link_ksettings, + .set_link_ksettings = bnad_set_link_ksettings, +}; + +void +bnad_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &bnad_ethtool_ops; +} diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h new file mode 100644 index 0000000000..28d89d0c22 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2006-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ + +#ifndef __CNA_H__ +#define __CNA_H__ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/delay.h> +#include <linux/bitops.h> +#include <linux/timer.h> +#include <linux/interrupt.h> +#include <linux/if_vlan.h> +#include <linux/if_ether.h> + +#define bfa_sm_fault(__event) do { \ + pr_err("SM Assertion failure: %s: %d: event = %d\n", \ + __FILE__, __LINE__, __event); \ +} while (0) + +extern char bfa_version[]; + +#define CNA_FW_FILE_CT "ctfw-3.2.5.1.bin" +#define CNA_FW_FILE_CT2 "ct2fw-3.2.5.1.bin" +#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */ + +#endif /* __CNA_H__ */ diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c new file mode 100644 index 0000000000..824eaef307 --- /dev/null +++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Linux network driver for QLogic BR-series Converged Network Adapter. + */ +/* + * Copyright (c) 2005-2014 Brocade Communications Systems, Inc. + * Copyright (c) 2014-2015 QLogic Corporation + * All rights reserved + * www.qlogic.com + */ +#include <linux/firmware.h> +#include "bnad.h" +#include "bfi.h" +#include "cna.h" + +const struct firmware *bfi_fw; +static u32 *bfi_image_ct_cna, *bfi_image_ct2_cna; +static u32 bfi_image_ct_cna_size, bfi_image_ct2_cna_size; + +static u32 * +cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image, + u32 *bfi_image_size, char *fw_name) +{ + const struct firmware *fw; + u32 n; + + if (request_firmware(&fw, fw_name, &pdev->dev)) { + dev_alert(&pdev->dev, "can't load firmware %s\n", fw_name); + goto error; + } + + *bfi_image = (u32 *)fw->data; + *bfi_image_size = fw->size/sizeof(u32); + bfi_fw = fw; + + /* Convert loaded firmware to host order as it is stored in file + * as sequence of LE32 integers. + */ + for (n = 0; n < *bfi_image_size; n++) + le32_to_cpus(*bfi_image + n); + + return *bfi_image; +error: + return NULL; +} + +u32 * +cna_get_firmware_buf(struct pci_dev *pdev) +{ + if (pdev->device == BFA_PCI_DEVICE_ID_CT2) { + if (bfi_image_ct2_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct2_cna, + &bfi_image_ct2_cna_size, CNA_FW_FILE_CT2); + return bfi_image_ct2_cna; + } else if (bfa_asic_id_ct(pdev->device)) { + if (bfi_image_ct_cna_size == 0) + cna_read_firmware(pdev, &bfi_image_ct_cna, + &bfi_image_ct_cna_size, CNA_FW_FILE_CT); + return bfi_image_ct_cna; + } + + return NULL; +} + +u32 * +bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return (bfi_image_ct_cna + off); + case BFI_ASIC_GEN_CT2: + return (bfi_image_ct2_cna + off); + default: + return NULL; + } +} + +u32 +bfa_cb_image_get_size(enum bfi_asic_gen asic_gen) +{ + switch (asic_gen) { + case BFI_ASIC_GEN_CT: + return bfi_image_ct_cna_size; + case BFI_ASIC_GEN_CT2: + return bfi_image_ct2_cna_size; + default: + return 0; + } +} |