diff options
Diffstat (limited to 'drivers/net/ethernet/marvell/octeon_ep')
17 files changed, 5998 insertions, 0 deletions
diff --git a/drivers/net/ethernet/marvell/octeon_ep/Kconfig b/drivers/net/ethernet/marvell/octeon_ep/Kconfig new file mode 100644 index 0000000000..0d7db81534 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Kconfig @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Marvell's Octeon PCI Endpoint NIC Driver Configuration +# + +config OCTEON_EP + tristate "Marvell Octeon PCI Endpoint NIC Driver" + depends on 64BIT + depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL + help + This driver supports networking functionality of Marvell's + Octeon PCI Endpoint NIC. + + To know the list of devices supported by this driver, refer + documentation in + <file:Documentation/networking/device_drivers/ethernet/marvell/octeon_ep.rst>. + + To compile this drivers as a module, choose M here. Name of the + module is octeon_ep. diff --git a/drivers/net/ethernet/marvell/octeon_ep/Makefile b/drivers/net/ethernet/marvell/octeon_ep/Makefile new file mode 100644 index 0000000000..2026c81181 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Network driver for Marvell's Octeon PCI Endpoint NIC +# + +obj-$(CONFIG_OCTEON_EP) += octeon_ep.o + +octeon_ep-y := octep_main.o octep_cn9k_pf.o octep_tx.o octep_rx.o \ + octep_ethtool.o octep_ctrl_mbox.o octep_ctrl_net.o diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c new file mode 100644 index 0000000000..90c3a41993 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cn9k_pf.c @@ -0,0 +1,755 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_regs_cn9k_pf.h" + +#define CTRL_MBOX_MAX_PF 128 +#define CTRL_MBOX_SZ ((size_t)(0x400000 / CTRL_MBOX_MAX_PF)) + +#define FW_HB_INTERVAL_IN_SECS 1 +#define FW_HB_MISS_COUNT 10 + +/* Names of Hardware non-queue generic interrupts */ +static char *cn93_non_ioq_msix_names[] = { + "epf_ire_rint", + "epf_ore_rint", + "epf_vfire_rint0", + "epf_vfire_rint1", + "epf_vfore_rint0", + "epf_vfore_rint1", + "epf_mbox_rint0", + "epf_mbox_rint1", + "epf_oei_rint", + "epf_dma_rint", + "epf_dma_vf_rint0", + "epf_dma_vf_rint1", + "epf_pp_vf_rint0", + "epf_pp_vf_rint1", + "epf_misc_rint", + "epf_rsvd", +}; + +/* Dump useful hardware CSRs for debug purpose */ +static void cn93_dump_regs(struct octep_device *oct, int qno) +{ + struct device *dev = &oct->pdev->dev; + + dev_info(dev, "IQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(qno))); + dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(qno))); + dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(qno))); + dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(qno))); + dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INSTR_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(qno))); + dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_CNTS(qno))); + dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_PKT_CNT(qno))); + dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_IN_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(qno))); + + dev_info(dev, "OQ-%d register dump\n", qno); + dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_DBELL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(qno))); + dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CONTROL(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(qno))); + dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_ENABLE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_BADDR(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(qno))); + dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_SLIST_RSIZE(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(qno))); + dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_CNTS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_CNTS(qno))); + dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_INT_LEVELS(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(qno))); + dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_PKT_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(qno))); + dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_OUT_BYTE_CNT(qno), + octep_read_csr64(oct, CN93_SDP_R_OUT_BYTE_CNT(qno))); + dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", + qno, CN93_SDP_R_ERR_TYPE(qno), + octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(qno))); +} + +/* Reset Hardware Tx queue */ +static int cn93_reset_iq(struct octep_device *oct, int q_no) +{ + struct octep_config *conf = oct->conf; + u64 val = 0ULL; + + dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); + + /* Get absolute queue number */ + q_no += conf->pf_ring_cfg.srn; + + /* Disable the Tx/Instruction Ring */ + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); + + /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ + octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), val); + + val = 0xFFFFFFFF; + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(q_no), val); + + return 0; +} + +/* Reset Hardware Rx queue */ +static void cn93_reset_oq(struct octep_device *oct, int q_no) +{ + u64 val = 0ULL; + + q_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + /* Disable Output (Rx) Ring */ + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(q_no), val); + + /* Clear count CSRs */ + val = octep_read_csr(oct, CN93_SDP_R_OUT_CNTS(q_no)); + octep_write_csr(oct, CN93_SDP_R_OUT_CNTS(q_no), val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF); +} + +/* Reset all hardware Tx/Rx queues */ +static void octep_reset_io_queues_cn93_pf(struct octep_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + int q; + + dev_dbg(&pdev->dev, "Reset OCTEP_CN93 PF IO Queues\n"); + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + cn93_reset_iq(oct, q); + cn93_reset_oq(oct, q); + } +} + +/* Initialize windowed addresses to access some hardware registers */ +static void octep_setup_pci_window_regs_cn93_pf(struct octep_device *oct) +{ + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + + oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_ADDR64); + oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_ADDR64); + oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_WR_DATA64); + oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CN93_SDP_WIN_RD_DATA64); +} + +/* Configure Hardware mapping: inform hardware which rings belong to PF. */ +static void octep_configure_ring_mapping_cn93_pf(struct octep_device *oct) +{ + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf); + int q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) { + u64 regval = 0; + + if (oct->pcie_port) + regval = 8 << CN93_SDP_FUNC_SEL_EPF_BIT_POS; + + octep_write_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q), regval); + + regval = octep_read_csr64(oct, CN93_SDP_EPVF_RING(pf_srn + q)); + dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n", + CN93_SDP_EPVF_RING(pf_srn + q), regval); + } +} + +/* Initialize configuration limits and initial active config 93xx PF. */ +static void octep_init_config_cn93_pf(struct octep_device *oct) +{ + struct octep_config *conf = oct->conf; + struct pci_dev *pdev = oct->pdev; + u8 link = 0; + u64 val; + int pos; + + /* Read ring configuration: + * PF ring count, number of VFs and rings per VF supported + */ + val = octep_read_csr64(oct, CN93_SDP_EPF_RINFO); + conf->sriov_cfg.max_rings_per_vf = CN93_SDP_EPF_RINFO_RPVF(val); + conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf; + conf->sriov_cfg.max_vfs = CN93_SDP_EPF_RINFO_NVFS(val); + conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs; + conf->sriov_cfg.vf_srn = CN93_SDP_EPF_RINFO_SRN(val); + + val = octep_read_csr64(oct, CN93_SDP_MAC_PF_RING_CTL(oct->pcie_port)); + conf->pf_ring_cfg.srn = CN93_SDP_MAC_PF_RING_CTL_SRN(val); + conf->pf_ring_cfg.max_io_rings = CN93_SDP_MAC_PF_RING_CTL_RPPF(val); + conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings; + dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n", + conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf, + conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings); + + conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS; + conf->iq.instr_type = OCTEP_64BYTE_INSTR; + conf->iq.pkind = 0; + conf->iq.db_min = OCTEP_DB_MIN; + conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD; + + conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; + conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; + conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; + conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; + conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; + + conf->msix_cfg.non_ioq_msix = CN93_NUM_NON_IOQ_INTR; + conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings; + conf->msix_cfg.non_ioq_msix_names = cn93_non_ioq_msix_names; + + pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV); + if (pos) { + pci_read_config_byte(oct->pdev, + pos + PCI_SRIOV_FUNC_LINK, + &link); + link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link); + } + conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr + + (0x400000ull * 7) + + (link * CTRL_MBOX_SZ); + + conf->hb_interval = FW_HB_INTERVAL_IN_SECS; + conf->max_hb_miss_cnt = FW_HB_MISS_COUNT; + +} + +/* Setup registers for a hardware Tx Queue */ +static void octep_setup_iq_regs_cn93_pf(struct octep_device *oct, int iq_no) +{ + struct octep_iq *iq = oct->iq[iq_no]; + u32 reset_instr_cnt; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_IN_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no)); + } while (!(reg_val & CN93_R_IN_CTL_IDLE)); + } + + reg_val |= CN93_R_IN_CTL_RDSIZE; + reg_val |= CN93_R_IN_CTL_IS_64B; + reg_val |= CN93_R_IN_CTL_ESR; + octep_write_csr64(oct, CN93_SDP_R_IN_CONTROL(iq_no), reg_val); + + /* Write the start of the input queue's ring and its size */ + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(iq_no), + iq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(iq_no), + iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INSTR_DBELL(iq_no); + iq->inst_cnt_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_CNTS(iq_no); + iq->intr_lvl_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_IN_INT_LEVELS(iq_no); + + /* Store the current instruction counter (used in flush_iq calculation) */ + reset_instr_cnt = readl(iq->inst_cnt_reg); + writel(reset_instr_cnt, iq->inst_cnt_reg); + + /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ + reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff; + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); +} + +/* Setup registers for a hardware Rx Queue */ +static void octep_setup_oq_regs_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val; + u64 oq_ctl = 0ULL; + u32 time_threshold = 0; + struct octep_oq *oq = oct->oq[oq_no]; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + + /* wait for IDLE to set to 1 */ + if (!(reg_val & CN93_R_OUT_CTL_IDLE)) { + do { + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + } while (!(reg_val & CN93_R_OUT_CTL_IDLE)); + } + + reg_val &= ~(CN93_R_OUT_CTL_IMODE); + reg_val &= ~(CN93_R_OUT_CTL_ROR_P); + reg_val &= ~(CN93_R_OUT_CTL_NSR_P); + reg_val &= ~(CN93_R_OUT_CTL_ROR_I); + reg_val &= ~(CN93_R_OUT_CTL_NSR_I); + reg_val &= ~(CN93_R_OUT_CTL_ES_I); + reg_val &= ~(CN93_R_OUT_CTL_ROR_D); + reg_val &= ~(CN93_R_OUT_CTL_NSR_D); + reg_val &= ~(CN93_R_OUT_CTL_ES_D); + reg_val |= (CN93_R_OUT_CTL_ES_P); + + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), reg_val); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_BADDR(oq_no), + oq->desc_ring_dma); + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_RSIZE(oq_no), + oq->max_count); + + oq_ctl = octep_read_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no)); + oq_ctl &= ~0x7fffffULL; //clear the ISIZE and BSIZE (22-0) + oq_ctl |= (oq->buffer_size & 0xffff); //populate the BSIZE (15-0) + octep_write_csr64(oct, CN93_SDP_R_OUT_CONTROL(oq_no), oq_ctl); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + oq->pkts_sent_reg = oct->mmio[0].hw_addr + CN93_SDP_R_OUT_CNTS(oq_no); + oq->pkts_credit_reg = oct->mmio[0].hw_addr + + CN93_SDP_R_OUT_SLIST_DBELL(oq_no); + + time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); + reg_val = ((u64)time_threshold << 32) | + CFG_GET_OQ_INTR_PKT(oct->conf); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); +} + +/* Setup registers for a PF mailbox */ +static void octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) +{ + struct octep_mbox *mbox = oct->mbox[q_no]; + + mbox->q_no = q_no; + + /* PF mbox interrupt reg */ + mbox->mbox_int_reg = oct->mmio[0].hw_addr + CN93_SDP_EPF_MBOX_RINT(0); + + /* PF to VF DATA reg. PF writes into this reg */ + mbox->mbox_write_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_PF_VF_DATA(q_no); + + /* VF to PF DATA reg. PF reads from this reg */ + mbox->mbox_read_reg = oct->mmio[0].hw_addr + CN93_SDP_R_MBOX_VF_PF_DATA(q_no); +} + +/* Process non-ioq interrupts required to keep pf interface running. + * OEI_RINT is needed for control mailbox + */ +static bool octep_poll_non_ioq_interrupts_cn93_pf(struct octep_device *oct) +{ + bool handled = false; + u64 reg0; + + /* Check for OEI INTR */ + reg0 = octep_read_csr64(oct, CN93_SDP_EPF_OEI_RINT); + if (reg0) { + dev_info(&oct->pdev->dev, + "Received OEI_RINT intr: 0x%llx\n", + reg0); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT, reg0); + if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX) + queue_work(octep_wq, &oct->ctrl_mbox_task); + else if (reg0 & CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT) + atomic_set(&oct->hb_miss_cnt, 0); + + handled = true; + } + + return handled; +} + +/* Interrupts handler for all non-queue generic interrupts. */ +static irqreturn_t octep_non_ioq_intr_handler_cn93_pf(void *dev) +{ + struct octep_device *oct = (struct octep_device *)dev; + struct pci_dev *pdev = oct->pdev; + u64 reg_val = 0; + int i = 0; + + /* Check for IRERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_IRERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "received IRERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT, reg_val); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, + CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on IQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + goto irq_handled; + } + + /* Check for ORERR INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_ORERR_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received ORERR_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT, reg_val); + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + reg_val = octep_read_csr64(oct, CN93_SDP_R_ERR_TYPE(i)); + if (reg_val) { + dev_info(&pdev->dev, + "Received err type on OQ-%d: 0x%llx\n", + i, reg_val); + octep_write_csr64(oct, CN93_SDP_R_ERR_TYPE(i), + reg_val); + } + } + + goto irq_handled; + } + + /* Check for VFIRE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFIRE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFIRE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for VFORE INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received VFORE_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_VFORE_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MBOX INTR and OEI INTR */ + if (octep_poll_non_ioq_interrupts_cn93_pf(oct)) + goto irq_handled; + + /* Check for DMA INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_RINT); + if (reg_val) { + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT, reg_val); + goto irq_handled; + } + + /* Check for DMA VF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received DMA_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for PPVF INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0)); + if (reg_val) { + dev_info(&pdev->dev, + "Received PP_VF_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_PP_VF_RINT(0), reg_val); + goto irq_handled; + } + + /* Check for MISC INTR */ + reg_val = octep_read_csr64(oct, CN93_SDP_EPF_MISC_RINT); + if (reg_val) { + dev_info(&pdev->dev, + "Received MISC_RINT intr: 0x%llx\n", reg_val); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT, reg_val); + goto irq_handled; + } + + dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n"); +irq_handled: + return IRQ_HANDLED; +} + +/* Tx/Rx queue interrupt handler */ +static irqreturn_t octep_ioq_intr_handler_cn93_pf(void *data) +{ + struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data; + struct octep_oq *oq = vector->oq; + + napi_schedule_irqoff(oq->napi); + return IRQ_HANDLED; +} + +/* soft reset of 93xx */ +static int octep_soft_reset_cn93_pf(struct octep_device *oct) +{ + dev_info(&oct->pdev->dev, "CN93XX: Doing soft reset\n"); + + octep_write_csr64(oct, CN93_SDP_WIN_WR_MASK_REG, 0xFF); + + /* Set core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1S, 1); + /* Wait for 100ms as Octeon resets. */ + mdelay(100); + /* clear core domain reset bit */ + OCTEP_PCI_WIN_WRITE(oct, CN93_RST_CORE_DOMAIN_W1C, 1); + + return 0; +} + +/* Re-initialize Octeon hardware registers */ +static void octep_reinit_regs_cn93_pf(struct octep_device *oct) +{ + u32 i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_iq_regs(oct, i); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + oct->hw_ops.setup_oq_regs(oct, i); + + oct->hw_ops.enable_interrupts(oct); + oct->hw_ops.enable_io_queues(oct); + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/* Enable all interrupts */ +static void octep_enable_interrupts_cn93_pf(struct octep_device *oct) +{ + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask); +} + +/* Disable all interrupts */ +static void octep_disable_interrupts_cn93_pf(struct octep_device *oct) +{ + u64 intr_mask = 0ULL; + int srn, num_rings, i; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (i = 0; i < num_rings; i++) + intr_mask |= (0x1ULL << (srn + i)); + + octep_write_csr64(oct, CN93_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL); + octep_write_csr64(oct, CN93_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask); + octep_write_csr64(oct, CN93_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask); +} + +/* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ +static u32 octep_update_iq_read_index_cn93_pf(struct octep_iq *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done, new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + new_idx = (iq->octep_read_index + last_done) % iq->max_count; + + return new_idx; +} + +/* Enable a hardware Tx Queue */ +static void octep_enable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ + u64 loop = HZ; + u64 reg_val; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF); + + while (octep_read_csr64(oct, CN93_SDP_R_IN_INSTR_DBELL(iq_no)) && + loop--) { + schedule_timeout_interruptible(1); + } + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(iq_no), reg_val); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Enable a hardware Rx Queue */ +static void octep_enable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no)); + reg_val |= (0x1ULL << 62); + octep_write_csr64(oct, CN93_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); + + octep_write_csr64(oct, CN93_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val |= 0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Enable all hardware Tx/Rx Queues assined to PF */ +static void octep_enable_io_queues_cn93_pf(struct octep_device *oct) +{ + u8 q; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_enable_iq_cn93_pf(oct, q); + octep_enable_oq_cn93_pf(oct, q); + } +} + +/* Disable a hardware Tx Queue assined to PF */ +static void octep_disable_iq_cn93_pf(struct octep_device *oct, int iq_no) +{ + u64 reg_val = 0ULL; + + iq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + + reg_val = octep_read_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(iq_no), reg_val); +} + +/* Disable a hardware Rx Queue assined to PF */ +static void octep_disable_oq_cn93_pf(struct octep_device *oct, int oq_no) +{ + u64 reg_val = 0ULL; + + oq_no += CFG_GET_PORTS_PF_SRN(oct->conf); + reg_val = octep_read_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no)); + reg_val &= ~0x1ULL; + octep_write_csr64(oct, CN93_SDP_R_OUT_ENABLE(oq_no), reg_val); +} + +/* Disable all hardware Tx/Rx Queues assined to PF */ +static void octep_disable_io_queues_cn93_pf(struct octep_device *oct) +{ + int q = 0; + + for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { + octep_disable_iq_cn93_pf(oct, q); + octep_disable_oq_cn93_pf(oct, q); + } +} + +/* Dump hardware registers (including Tx/Rx queues) for debugging. */ +static void octep_dump_registers_cn93_pf(struct octep_device *oct) +{ + u8 srn, num_rings, q; + + srn = CFG_GET_PORTS_PF_SRN(oct->conf); + num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + for (q = srn; q < srn + num_rings; q++) + cn93_dump_regs(oct, q); +} + +/** + * octep_device_setup_cn93_pf() - Setup Octeon device. + * + * @oct: Octeon device private data structure. + * + * - initialize hardware operations. + * - get target side pcie port number for the device. + * - setup window access to hardware registers. + * - set initial configuration and max limits. + * - setup hardware mapping of rings to the PF device. + */ +void octep_device_setup_cn93_pf(struct octep_device *oct) +{ + oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cn93_pf; + oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cn93_pf; + oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cn93_pf; + + oct->hw_ops.non_ioq_intr_handler = octep_non_ioq_intr_handler_cn93_pf; + oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cn93_pf; + oct->hw_ops.soft_reset = octep_soft_reset_cn93_pf; + oct->hw_ops.reinit_regs = octep_reinit_regs_cn93_pf; + + oct->hw_ops.enable_interrupts = octep_enable_interrupts_cn93_pf; + oct->hw_ops.disable_interrupts = octep_disable_interrupts_cn93_pf; + oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cn93_pf; + + oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cn93_pf; + + oct->hw_ops.enable_iq = octep_enable_iq_cn93_pf; + oct->hw_ops.enable_oq = octep_enable_oq_cn93_pf; + oct->hw_ops.enable_io_queues = octep_enable_io_queues_cn93_pf; + + oct->hw_ops.disable_iq = octep_disable_iq_cn93_pf; + oct->hw_ops.disable_oq = octep_disable_oq_cn93_pf; + oct->hw_ops.disable_io_queues = octep_disable_io_queues_cn93_pf; + oct->hw_ops.reset_io_queues = octep_reset_io_queues_cn93_pf; + + oct->hw_ops.dump_registers = octep_dump_registers_cn93_pf; + + octep_setup_pci_window_regs_cn93_pf(oct); + + oct->pcie_port = octep_read_csr64(oct, CN93_SDP_MAC_NUMBER) & 0xff; + dev_info(&oct->pdev->dev, + "Octeon device using PCIE Port %d\n", oct->pcie_port); + + octep_init_config_cn93_pf(oct); + octep_configure_ring_mapping_cn93_pf(oct); +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_config.h b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h new file mode 100644 index 0000000000..df7cd39d9f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_config.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_CONFIG_H_ +#define _OCTEP_CONFIG_H_ + +/* Tx instruction types by length */ +#define OCTEP_32BYTE_INSTR 32 +#define OCTEP_64BYTE_INSTR 64 + +/* Tx Queue: maximum descriptors per ring */ +#define OCTEP_IQ_MAX_DESCRIPTORS 1024 +/* Minimum input (Tx) requests to be enqueued to ring doorbell */ +#define OCTEP_DB_MIN 1 +/* Packet threshold for Tx queue interrupt */ +#define OCTEP_IQ_INTR_THRESHOLD 0x0 + +/* Rx Queue: maximum descriptors per ring */ +#define OCTEP_OQ_MAX_DESCRIPTORS 1024 + +/* Rx buffer size: Use page size buffers. + * Build skb from allocated page buffer once the packet is received. + * When a gathered packet is received, make head page as skb head and + * page buffers in consecutive Rx descriptors as fragments. + */ +#define OCTEP_OQ_BUF_SIZE (SKB_WITH_OVERHEAD(PAGE_SIZE)) +#define OCTEP_OQ_PKTS_PER_INTR 128 +#define OCTEP_OQ_REFILL_THRESHOLD (OCTEP_OQ_MAX_DESCRIPTORS / 4) + +#define OCTEP_OQ_INTR_PKT_THRESHOLD 1 +#define OCTEP_OQ_INTR_TIME_THRESHOLD 10 + +#define OCTEP_MSIX_NAME_SIZE (IFNAMSIZ + 32) + +/* Tx Queue wake threshold + * wakeup a stopped Tx queue if minimum 2 descriptors are available. + * Even a skb with fragments consume only one Tx queue descriptor entry. + */ +#define OCTEP_WAKE_QUEUE_THRESHOLD 2 + +/* Minimum MTU supported by Octeon network interface */ +#define OCTEP_MIN_MTU ETH_MIN_MTU +/* Maximum MTU supported by Octeon interface*/ +#define OCTEP_MAX_MTU (10000 - (ETH_HLEN + ETH_FCS_LEN)) +/* Default MTU */ +#define OCTEP_DEFAULT_MTU 1500 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_NUM_DESC(cfg) ((cfg)->iq.num_descs) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_PKIND(cfg) ((cfg)->iq.pkind) +#define CFG_GET_IQ_INSTR_SIZE(cfg) (64) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_INTR_THRESHOLD(cfg) ((cfg)->iq.intr_threshold) + +#define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) +#define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) + +#define CFG_GET_PORTS_MAX_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.max_io_rings) +#define CFG_GET_PORTS_ACTIVE_IO_RINGS(cfg) ((cfg)->pf_ring_cfg.active_io_rings) +#define CFG_GET_PORTS_PF_SRN(cfg) ((cfg)->pf_ring_cfg.srn) + +#define CFG_GET_DPI_PKIND(cfg) ((cfg)->core_cfg.dpi_pkind) +#define CFG_GET_CORE_TICS_PER_US(cfg) ((cfg)->core_cfg.core_tics_per_us) +#define CFG_GET_COPROC_TICS_PER_US(cfg) ((cfg)->core_cfg.coproc_tics_per_us) + +#define CFG_GET_MAX_VFS(cfg) ((cfg)->sriov_cfg.max_vfs) +#define CFG_GET_ACTIVE_VFS(cfg) ((cfg)->sriov_cfg.active_vfs) +#define CFG_GET_MAX_RPVF(cfg) ((cfg)->sriov_cfg.max_rings_per_vf) +#define CFG_GET_ACTIVE_RPVF(cfg) ((cfg)->sriov_cfg.active_rings_per_vf) +#define CFG_GET_VF_SRN(cfg) ((cfg)->sriov_cfg.vf_srn) + +#define CFG_GET_IOQ_MSIX(cfg) ((cfg)->msix_cfg.ioq_msix) +#define CFG_GET_NON_IOQ_MSIX(cfg) ((cfg)->msix_cfg.non_ioq_msix) +#define CFG_GET_NON_IOQ_MSIX_NAMES(cfg) ((cfg)->msix_cfg.non_ioq_msix_names) + +#define CFG_GET_CTRL_MBOX_MEM_ADDR(cfg) ((cfg)->ctrl_mbox_cfg.barmem_addr) + +/* Hardware Tx Queue configuration. */ +struct octep_iq_config { + /* Size of the Input queue (number of commands) */ + u16 num_descs; + + /* Command size - 32 or 64 bytes */ + u16 instr_type; + + /* pkind for packets sent to Octeon */ + u16 pkind; + + /* Minimum number of commands pending to be posted to Octeon before driver + * hits the Input queue doorbell. + */ + u16 db_min; + + /* Trigger the IQ interrupt when processed cmd count reaches + * this level. + */ + u32 intr_threshold; +}; + +/* Hardware Rx Queue configuration. */ +struct octep_oq_config { + /* Size of Output queue (number of descriptors) */ + u16 num_descs; + + /* Size of buffer in this Output queue. */ + u16 buf_size; + + /* The number of buffers that were consumed during packet processing + * by the driver on this Output queue before the driver attempts to + * replenish the descriptor ring with new buffers. + */ + u16 refill_threshold; + + /* Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver usually does not use packet count interrupt coalescing. + */ + u32 oq_intr_pkt; + + /* Interrupt Coalescing (Time Interval). Octeon will interrupt the host + * if at least one packet was sent in the time interval specified by + * this field. The driver uses time interval interrupt coalescing by + * default. The time is specified in microseconds. + */ + u32 oq_intr_time; +}; + +/* Tx/Rx configuration */ +struct octep_pf_ring_config { + /* Max number of IOQs */ + u16 max_io_rings; + + /* Number of active IOQs */ + u16 active_io_rings; + + /* Starting IOQ number: this changes based on which PEM is used */ + u16 srn; +}; + +/* Octeon Hardware SRIOV config */ +struct octep_sriov_config { + /* Max number of VF devices supported */ + u16 max_vfs; + + /* Number of VF devices enabled */ + u16 active_vfs; + + /* Max number of rings assigned to VF */ + u8 max_rings_per_vf; + + /* Number of rings enabled per VF */ + u8 active_rings_per_vf; + + /* starting ring number of VF's: ring-0 of VF-0 of the PF */ + u16 vf_srn; +}; + +/* Octeon MSI-x config. */ +struct octep_msix_config { + /* Number of IOQ interrupts */ + u16 ioq_msix; + + /* Number of Non IOQ interrupts */ + u16 non_ioq_msix; + + /* Names of Non IOQ interrupts */ + char **non_ioq_msix_names; +}; + +struct octep_ctrl_mbox_config { + /* Barmem address for control mbox */ + void __iomem *barmem_addr; +}; + +/* Data Structure to hold configuration limits and active config */ +struct octep_config { + /* Input Queue attributes. */ + struct octep_iq_config iq; + + /* Output Queue attributes. */ + struct octep_oq_config oq; + + /* NIC Port Configuration */ + struct octep_pf_ring_config pf_ring_cfg; + + /* SRIOV configuration of the PF */ + struct octep_sriov_config sriov_cfg; + + /* MSI-X interrupt config */ + struct octep_msix_config msix_cfg; + + /* ctrl mbox config */ + struct octep_ctrl_mbox_config ctrl_mbox_cfg; + + /* Configured maximum heartbeat miss count */ + u32 max_hb_miss_cnt; + + /* Configured firmware heartbeat interval in secs */ + u32 hb_interval; +}; +#endif /* _OCTEP_CONFIG_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h new file mode 100644 index 0000000000..0c741e752d --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_cp_version.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2022 Marvell. + */ +#ifndef __OCTEP_CP_VERSION_H__ +#define __OCTEP_CP_VERSION_H__ + +#define OCTEP_CP_VERSION(a, b, c) ((((a) & 0xff) << 16) + \ + (((b) & 0xff) << 8) + \ + ((c) & 0xff)) + +#endif /* __OCTEP_CP_VERSION_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c new file mode 100644 index 0000000000..9d53c1402c --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mutex.h> +#include <linux/jiffies.h> +#include <linux/sched.h> +#include <linux/sched/signal.h> +#include <linux/io.h> +#include <linux/pci.h> +#include <linux/etherdevice.h> + +#include "octep_ctrl_mbox.h" +#include "octep_config.h" +#include "octep_main.h" + +/* Timeout in msecs for message response */ +#define OCTEP_CTRL_MBOX_MSG_TIMEOUT_MS 100 +/* Time in msecs to wait for message response */ +#define OCTEP_CTRL_MBOX_MSG_WAIT_MS 10 + +/* Size of mbox info in bytes */ +#define OCTEP_CTRL_MBOX_INFO_SZ 256 +/* Size of mbox host to fw queue info in bytes */ +#define OCTEP_CTRL_MBOX_H2FQ_INFO_SZ 16 +/* Size of mbox fw to host queue info in bytes */ +#define OCTEP_CTRL_MBOX_F2HQ_INFO_SZ 16 + +#define OCTEP_CTRL_MBOX_TOTAL_INFO_SZ (OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ + \ + OCTEP_CTRL_MBOX_F2HQ_INFO_SZ) + +#define OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(m) (m) +#define OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(m) ((m) + 8) +#define OCTEP_CTRL_MBOX_INFO_HOST_VERSION(m) ((m) + 16) +#define OCTEP_CTRL_MBOX_INFO_HOST_STATUS(m) ((m) + 24) +#define OCTEP_CTRL_MBOX_INFO_FW_VERSION(m) ((m) + 136) +#define OCTEP_CTRL_MBOX_INFO_FW_STATUS(m) ((m) + 144) + +#define OCTEP_CTRL_MBOX_H2FQ_INFO(m) ((m) + OCTEP_CTRL_MBOX_INFO_SZ) +#define OCTEP_CTRL_MBOX_H2FQ_PROD(m) (OCTEP_CTRL_MBOX_H2FQ_INFO(m)) +#define OCTEP_CTRL_MBOX_H2FQ_CONS(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 4) +#define OCTEP_CTRL_MBOX_H2FQ_SZ(m) ((OCTEP_CTRL_MBOX_H2FQ_INFO(m)) + 8) + +#define OCTEP_CTRL_MBOX_F2HQ_INFO(m) ((m) + \ + OCTEP_CTRL_MBOX_INFO_SZ + \ + OCTEP_CTRL_MBOX_H2FQ_INFO_SZ) +#define OCTEP_CTRL_MBOX_F2HQ_PROD(m) (OCTEP_CTRL_MBOX_F2HQ_INFO(m)) +#define OCTEP_CTRL_MBOX_F2HQ_CONS(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 4) +#define OCTEP_CTRL_MBOX_F2HQ_SZ(m) ((OCTEP_CTRL_MBOX_F2HQ_INFO(m)) + 8) + +static const u32 mbox_hdr_sz = sizeof(union octep_ctrl_mbox_msg_hdr); + +static u32 octep_ctrl_mbox_circq_inc(u32 index, u32 inc, u32 sz) +{ + return (index + inc) % sz; +} + +static u32 octep_ctrl_mbox_circq_space(u32 pi, u32 ci, u32 sz) +{ + return sz - (abs(pi - ci) % sz); +} + +static u32 octep_ctrl_mbox_circq_depth(u32 pi, u32 ci, u32 sz) +{ + return (abs(pi - ci) % sz); +} + +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox) +{ + u64 magic_num, status, fw_versions; + + if (!mbox) + return -EINVAL; + + if (!mbox->barmem) { + pr_info("octep_ctrl_mbox : Invalid barmem %p\n", mbox->barmem); + return -EINVAL; + } + + magic_num = readq(OCTEP_CTRL_MBOX_INFO_MAGIC_NUM(mbox->barmem)); + if (magic_num != OCTEP_CTRL_MBOX_MAGIC_NUMBER) { + pr_info("octep_ctrl_mbox : Invalid magic number %llx\n", magic_num); + return -EINVAL; + } + + status = readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)); + if (status != OCTEP_CTRL_MBOX_STATUS_READY) { + pr_info("octep_ctrl_mbox : Firmware is not ready.\n"); + return -EINVAL; + } + + fw_versions = readq(OCTEP_CTRL_MBOX_INFO_FW_VERSION(mbox->barmem)); + mbox->min_fw_version = ((fw_versions & 0xffffffff00000000ull) >> 32); + mbox->max_fw_version = (fw_versions & 0xffffffff); + mbox->barmem_sz = readl(OCTEP_CTRL_MBOX_INFO_BARMEM_SZ(mbox->barmem)); + + writeq(OCTEP_CTRL_MBOX_STATUS_INIT, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); + + mutex_init(&mbox->h2fq_lock); + mutex_init(&mbox->f2hq_lock); + + mbox->h2fq.sz = readl(OCTEP_CTRL_MBOX_H2FQ_SZ(mbox->barmem)); + mbox->h2fq.hw_prod = OCTEP_CTRL_MBOX_H2FQ_PROD(mbox->barmem); + mbox->h2fq.hw_cons = OCTEP_CTRL_MBOX_H2FQ_CONS(mbox->barmem); + mbox->h2fq.hw_q = mbox->barmem + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ; + + mbox->f2hq.sz = readl(OCTEP_CTRL_MBOX_F2HQ_SZ(mbox->barmem)); + mbox->f2hq.hw_prod = OCTEP_CTRL_MBOX_F2HQ_PROD(mbox->barmem); + mbox->f2hq.hw_cons = OCTEP_CTRL_MBOX_F2HQ_CONS(mbox->barmem); + mbox->f2hq.hw_q = mbox->barmem + + OCTEP_CTRL_MBOX_TOTAL_INFO_SZ + + mbox->h2fq.sz; + + writeq(mbox->version, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); + /* ensure ready state is seen after everything is initialized */ + wmb(); + writeq(OCTEP_CTRL_MBOX_STATUS_READY, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); + + pr_info("Octep ctrl mbox : Init successful.\n"); + + return 0; +} + +static void +octep_write_mbox_data(struct octep_ctrl_mbox_q *q, u32 *pi, u32 ci, void *buf, u32 w_sz) +{ + u8 __iomem *qbuf; + u32 cp_sz; + + /* Assumption: Caller has ensured enough write space */ + qbuf = (q->hw_q + *pi); + if (*pi < ci) { + /* copy entire w_sz */ + memcpy_toio(qbuf, buf, w_sz); + *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz); + } else { + /* copy up to end of queue */ + cp_sz = min((q->sz - *pi), w_sz); + memcpy_toio(qbuf, buf, cp_sz); + w_sz -= cp_sz; + *pi = octep_ctrl_mbox_circq_inc(*pi, cp_sz, q->sz); + if (w_sz) { + /* roll over and copy remaining w_sz */ + buf += cp_sz; + qbuf = (q->hw_q + *pi); + memcpy_toio(qbuf, buf, w_sz); + *pi = octep_ctrl_mbox_circq_inc(*pi, w_sz, q->sz); + } + } +} + +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + struct octep_ctrl_mbox_msg_buf *sg; + struct octep_ctrl_mbox_q *q; + u32 pi, ci, buf_sz, w_sz; + int s; + + if (!mbox || !msg) + return -EINVAL; + + if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY) + return -EIO; + + mutex_lock(&mbox->h2fq_lock); + q = &mbox->h2fq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + + if (octep_ctrl_mbox_circq_space(pi, ci, q->sz) < (msg->hdr.s.sz + mbox_hdr_sz)) { + mutex_unlock(&mbox->h2fq_lock); + return -EAGAIN; + } + + octep_write_mbox_data(q, &pi, ci, (void *)&msg->hdr, mbox_hdr_sz); + buf_sz = msg->hdr.s.sz; + for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) { + sg = &msg->sg_list[s]; + w_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz; + octep_write_mbox_data(q, &pi, ci, sg->msg, w_sz); + buf_sz -= w_sz; + } + writel(pi, q->hw_prod); + mutex_unlock(&mbox->h2fq_lock); + + return 0; +} + +static void +octep_read_mbox_data(struct octep_ctrl_mbox_q *q, u32 pi, u32 *ci, void *buf, u32 r_sz) +{ + u8 __iomem *qbuf; + u32 cp_sz; + + /* Assumption: Caller has ensured enough read space */ + qbuf = (q->hw_q + *ci); + if (*ci < pi) { + /* copy entire r_sz */ + memcpy_fromio(buf, qbuf, r_sz); + *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz); + } else { + /* copy up to end of queue */ + cp_sz = min((q->sz - *ci), r_sz); + memcpy_fromio(buf, qbuf, cp_sz); + r_sz -= cp_sz; + *ci = octep_ctrl_mbox_circq_inc(*ci, cp_sz, q->sz); + if (r_sz) { + /* roll over and copy remaining r_sz */ + buf += cp_sz; + qbuf = (q->hw_q + *ci); + memcpy_fromio(buf, qbuf, r_sz); + *ci = octep_ctrl_mbox_circq_inc(*ci, r_sz, q->sz); + } + } +} + +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg) +{ + struct octep_ctrl_mbox_msg_buf *sg; + u32 pi, ci, r_sz, buf_sz, q_depth; + struct octep_ctrl_mbox_q *q; + int s; + + if (readq(OCTEP_CTRL_MBOX_INFO_FW_STATUS(mbox->barmem)) != OCTEP_CTRL_MBOX_STATUS_READY) + return -EIO; + + mutex_lock(&mbox->f2hq_lock); + q = &mbox->f2hq; + pi = readl(q->hw_prod); + ci = readl(q->hw_cons); + + q_depth = octep_ctrl_mbox_circq_depth(pi, ci, q->sz); + if (q_depth < mbox_hdr_sz) { + mutex_unlock(&mbox->f2hq_lock); + return -EAGAIN; + } + + octep_read_mbox_data(q, pi, &ci, (void *)&msg->hdr, mbox_hdr_sz); + buf_sz = msg->hdr.s.sz; + for (s = 0; ((s < msg->sg_num) && (buf_sz > 0)); s++) { + sg = &msg->sg_list[s]; + r_sz = (sg->sz <= buf_sz) ? sg->sz : buf_sz; + octep_read_mbox_data(q, pi, &ci, sg->msg, r_sz); + buf_sz -= r_sz; + } + writel(ci, q->hw_cons); + mutex_unlock(&mbox->f2hq_lock); + + return 0; +} + +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox) +{ + if (!mbox) + return -EINVAL; + if (!mbox->barmem) + return -EINVAL; + + writeq(0, OCTEP_CTRL_MBOX_INFO_HOST_VERSION(mbox->barmem)); + writeq(OCTEP_CTRL_MBOX_STATUS_INVALID, + OCTEP_CTRL_MBOX_INFO_HOST_STATUS(mbox->barmem)); + /* ensure uninit state is written before uninitialization */ + wmb(); + + mutex_destroy(&mbox->h2fq_lock); + mutex_destroy(&mbox->f2hq_lock); + + pr_info("Octep ctrl mbox : Uninit successful.\n"); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h new file mode 100644 index 0000000000..7f8135788e --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_mbox.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + #ifndef __OCTEP_CTRL_MBOX_H__ +#define __OCTEP_CTRL_MBOX_H__ + +/* barmem structure + * |===========================================| + * |Info (16 + 120 + 120 = 256 bytes) | + * |-------------------------------------------| + * |magic number (8 bytes) | + * |bar memory size (4 bytes) | + * |reserved (4 bytes) | + * |-------------------------------------------| + * |host version (8 bytes) | + * |host status (8 bytes) | + * |host reserved (104 bytes) | + * |-------------------------------------------| + * |fw version (8 bytes) | + * |fw status (8 bytes) | + * |fw reserved (104 bytes) | + * |===========================================| + * |Host to Fw Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |max element size (4 bytes) | + * |reserved (4 bytes) | + * |===========================================| + * |Fw to Host Queue info (16 bytes) | + * |-------------------------------------------| + * |producer index (4 bytes) | + * |consumer index (4 bytes) | + * |max element size (4 bytes) | + * |reserved (4 bytes) | + * |===========================================| + * |Host to Fw Queue ((total size-288/2) bytes)| + * |-------------------------------------------| + * | | + * |===========================================| + * |===========================================| + * |Fw to Host Queue ((total size-288/2) bytes)| + * |-------------------------------------------| + * | | + * |===========================================| + */ + +#define OCTEP_CTRL_MBOX_MAGIC_NUMBER 0xdeaddeadbeefbeefull + +/* Valid request message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ BIT(0) +/* Valid response message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP BIT(1) +/* Valid notification, no response required */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY BIT(2) +/* Valid custom message */ +#define OCTEP_CTRL_MBOX_MSG_HDR_FLAG_CUSTOM BIT(3) + +#define OCTEP_CTRL_MBOX_MSG_DESC_MAX 4 + +enum octep_ctrl_mbox_status { + OCTEP_CTRL_MBOX_STATUS_INVALID = 0, + OCTEP_CTRL_MBOX_STATUS_INIT, + OCTEP_CTRL_MBOX_STATUS_READY, + OCTEP_CTRL_MBOX_STATUS_UNINIT +}; + +/* mbox message */ +union octep_ctrl_mbox_msg_hdr { + u64 words[2]; + struct { + /* must be 0 */ + u16 reserved1:15; + /* vf_idx is valid if 1 */ + u16 is_vf:1; + /* sender vf index 0-(n-1), 0 if (is_vf==0) */ + u16 vf_idx; + /* total size of message excluding header */ + u32 sz; + /* OCTEP_CTRL_MBOX_MSG_HDR_FLAG_* */ + u32 flags; + /* identifier to match responses */ + u16 msg_id; + u16 reserved2; + } s; +}; + +/* mbox message buffer */ +struct octep_ctrl_mbox_msg_buf { + u32 reserved1; + u16 reserved2; + /* size of buffer */ + u16 sz; + /* pointer to message buffer */ + void *msg; +}; + +/* mbox message */ +struct octep_ctrl_mbox_msg { + /* mbox transaction header */ + union octep_ctrl_mbox_msg_hdr hdr; + /* number of sg buffer's */ + int sg_num; + /* message buffer's */ + struct octep_ctrl_mbox_msg_buf sg_list[OCTEP_CTRL_MBOX_MSG_DESC_MAX]; +}; + +/* Mbox queue */ +struct octep_ctrl_mbox_q { + /* size of queue buffer */ + u32 sz; + /* producer address in bar mem */ + u8 __iomem *hw_prod; + /* consumer address in bar mem */ + u8 __iomem *hw_cons; + /* q base address in bar mem */ + u8 __iomem *hw_q; +}; + +struct octep_ctrl_mbox { + /* control plane version */ + u64 version; + /* size of bar memory */ + u32 barmem_sz; + /* pointer to BAR memory */ + u8 __iomem *barmem; + /* host-to-fw queue */ + struct octep_ctrl_mbox_q h2fq; + /* fw-to-host queue */ + struct octep_ctrl_mbox_q f2hq; + /* lock for h2fq */ + struct mutex h2fq_lock; + /* lock for f2hq */ + struct mutex f2hq_lock; + /* Min control plane version supported by firmware */ + u32 min_fw_version; + /* Max control plane version supported by firmware */ + u32 max_fw_version; +}; + +/* Initialize control mbox. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_init(struct octep_ctrl_mbox *mbox); + +/* Send mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * @param msg: non-null pointer to struct octep_ctrl_mbox_msg. + * Caller should fill msg.sz and msg.desc.sz for each message. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_send(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Retrieve mbox message. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * @param msg: non-null pointer to struct octep_ctrl_mbox_msg. + * Caller should fill msg.sz and msg.desc.sz for each message. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_recv(struct octep_ctrl_mbox *mbox, struct octep_ctrl_mbox_msg *msg); + +/* Uninitialize control mbox. + * + * @param mbox: non-null pointer to struct octep_ctrl_mbox. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_mbox_uninit(struct octep_ctrl_mbox *mbox); + +#endif /* __OCTEP_CTRL_MBOX_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c new file mode 100644 index 0000000000..17bfd5cdf4 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#include <linux/string.h> +#include <linux/types.h> +#include <linux/etherdevice.h> +#include <linux/pci.h> +#include <linux/wait.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +/* Control plane version */ +#define OCTEP_CP_VERSION_CURRENT OCTEP_CP_VERSION(1, 0, 0) + +static const u32 req_hdr_sz = sizeof(union octep_ctrl_net_req_hdr); +static const u32 mtu_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mtu); +static const u32 mac_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_mac); +static const u32 state_sz = sizeof(struct octep_ctrl_net_h2f_req_cmd_state); +static const u32 link_info_sz = sizeof(struct octep_ctrl_net_link_info); +static atomic_t ctrl_net_msg_id; + +/* Control plane version in which OCTEP_CTRL_NET_H2F_CMD was added */ +static const u32 octep_ctrl_net_h2f_cmd_versions[OCTEP_CTRL_NET_H2F_CMD_MAX] = { + [OCTEP_CTRL_NET_H2F_CMD_INVALID ... OCTEP_CTRL_NET_H2F_CMD_LINK_INFO] = + OCTEP_CP_VERSION(1, 0, 0) +}; + +/* Control plane version in which OCTEP_CTRL_NET_F2H_CMD was added */ +static const u32 octep_ctrl_net_f2h_cmd_versions[OCTEP_CTRL_NET_F2H_CMD_MAX] = { + [OCTEP_CTRL_NET_F2H_CMD_INVALID ... OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS] = + OCTEP_CP_VERSION(1, 0, 0) +}; + +static void init_send_req(struct octep_ctrl_mbox_msg *msg, void *buf, + u16 sz, int vfid) +{ + msg->hdr.s.flags = OCTEP_CTRL_MBOX_MSG_HDR_FLAG_REQ; + msg->hdr.s.msg_id = atomic_inc_return(&ctrl_net_msg_id) & + GENMASK(sizeof(msg->hdr.s.msg_id) * BITS_PER_BYTE, 0); + msg->hdr.s.sz = req_hdr_sz + sz; + msg->sg_num = 1; + msg->sg_list[0].msg = buf; + msg->sg_list[0].sz = msg->hdr.s.sz; + if (vfid != OCTEP_CTRL_NET_INVALID_VFID) { + msg->hdr.s.is_vf = 1; + msg->hdr.s.vf_idx = vfid; + } +} + +static int octep_send_mbox_req(struct octep_device *oct, + struct octep_ctrl_net_wait_data *d, + bool wait_for_response) +{ + int err, ret, cmd; + + /* check if firmware is compatible for this request */ + cmd = d->data.req.hdr.s.cmd; + if (octep_ctrl_net_h2f_cmd_versions[cmd] > oct->ctrl_mbox.max_fw_version || + octep_ctrl_net_h2f_cmd_versions[cmd] < oct->ctrl_mbox.min_fw_version) + return -EOPNOTSUPP; + + err = octep_ctrl_mbox_send(&oct->ctrl_mbox, &d->msg); + if (err < 0) + return err; + + if (!wait_for_response) + return 0; + + d->done = 0; + INIT_LIST_HEAD(&d->list); + list_add_tail(&d->list, &oct->ctrl_req_wait_list); + ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q, + (d->done != 0), + msecs_to_jiffies(500)); + list_del(&d->list); + if (ret == 0 || ret == 1) + return -EAGAIN; + + /** + * (ret == 0) cond = false && timeout, return 0 + * (ret < 0) interrupted by signal, return 0 + * (ret == 1) cond = true && timeout, return 1 + * (ret >= 1) cond = true && !timeout, return 1 + */ + + if (d->data.resp.hdr.s.reply != OCTEP_CTRL_NET_REPLY_OK) + return -EAGAIN; + + return 0; +} + +int octep_ctrl_net_init(struct octep_device *oct) +{ + struct octep_ctrl_mbox *ctrl_mbox; + struct pci_dev *pdev = oct->pdev; + int ret; + + init_waitqueue_head(&oct->ctrl_req_wait_q); + INIT_LIST_HEAD(&oct->ctrl_req_wait_list); + + /* Initialize control mbox */ + ctrl_mbox = &oct->ctrl_mbox; + ctrl_mbox->version = OCTEP_CP_VERSION_CURRENT; + ctrl_mbox->barmem = CFG_GET_CTRL_MBOX_MEM_ADDR(oct->conf); + ret = octep_ctrl_mbox_init(ctrl_mbox); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize control mbox\n"); + return ret; + } + dev_info(&pdev->dev, "Control plane versions host: %llx, firmware: %x:%x\n", + ctrl_mbox->version, ctrl_mbox->min_fw_version, + ctrl_mbox->max_fw_version); + oct->ctrl_mbox_ifstats_offset = ctrl_mbox->barmem_sz; + + return 0; +} + +int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + int err; + + init_send_req(&d.msg, (void *)req, state_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req->link.cmd = OCTEP_CTRL_NET_CMD_GET; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + return d.data.resp.link.state; +} + +int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up, + bool wait_for_response) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + + init_send_req(&d.msg, req, state_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; + req->link.cmd = OCTEP_CTRL_NET_CMD_SET; + req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : + OCTEP_CTRL_NET_STATE_DOWN; + + return octep_send_mbox_req(oct, &d, wait_for_response); +} + +int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up, + bool wait_for_response) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + + init_send_req(&d.msg, req, state_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_RX_STATE; + req->link.cmd = OCTEP_CTRL_NET_CMD_SET; + req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : + OCTEP_CTRL_NET_STATE_DOWN; + + return octep_send_mbox_req(oct, &d, wait_for_response); +} + +int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + int err; + + init_send_req(&d.msg, req, mac_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req->link.cmd = OCTEP_CTRL_NET_CMD_GET; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + memcpy(addr, d.data.resp.mac.addr, ETH_ALEN); + + return 0; +} + +int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr, + bool wait_for_response) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + + init_send_req(&d.msg, req, mac_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MAC; + req->mac.cmd = OCTEP_CTRL_NET_CMD_SET; + memcpy(&req->mac.addr, addr, ETH_ALEN); + + return octep_send_mbox_req(oct, &d, wait_for_response); +} + +int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu, + bool wait_for_response) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + + init_send_req(&d.msg, req, mtu_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_MTU; + req->mtu.cmd = OCTEP_CTRL_NET_CMD_SET; + req->mtu.val = mtu; + + return octep_send_mbox_req(oct, &d, wait_for_response); +} + +int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid, + struct octep_iface_rx_stats *rx_stats, + struct octep_iface_tx_stats *tx_stats) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + struct octep_ctrl_net_h2f_resp *resp; + int err; + + init_send_req(&d.msg, req, 0, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + resp = &d.data.resp; + memcpy(rx_stats, &resp->if_stats.rx_stats, sizeof(struct octep_iface_rx_stats)); + memcpy(tx_stats, &resp->if_stats.tx_stats, sizeof(struct octep_iface_tx_stats)); + return 0; +} + +int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid, + struct octep_iface_link_info *link_info) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + struct octep_ctrl_net_h2f_resp *resp; + int err; + + init_send_req(&d.msg, req, link_info_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req->link_info.cmd = OCTEP_CTRL_NET_CMD_GET; + err = octep_send_mbox_req(oct, &d, true); + if (err < 0) + return err; + + resp = &d.data.resp; + link_info->supported_modes = resp->link_info.supported_modes; + link_info->advertised_modes = resp->link_info.advertised_modes; + link_info->autoneg = resp->link_info.autoneg; + link_info->pause = resp->link_info.pause; + link_info->speed = resp->link_info.speed; + + return 0; +} + +int octep_ctrl_net_set_link_info(struct octep_device *oct, int vfid, + struct octep_iface_link_info *link_info, + bool wait_for_response) +{ + struct octep_ctrl_net_wait_data d = {0}; + struct octep_ctrl_net_h2f_req *req = &d.data.req; + + init_send_req(&d.msg, req, link_info_sz, vfid); + req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_INFO; + req->link_info.cmd = OCTEP_CTRL_NET_CMD_SET; + req->link_info.info.advertised_modes = link_info->advertised_modes; + req->link_info.info.autoneg = link_info->autoneg; + req->link_info.info.pause = link_info->pause; + req->link_info.info.speed = link_info->speed; + + return octep_send_mbox_req(oct, &d, wait_for_response); +} + +static void process_mbox_resp(struct octep_device *oct, + struct octep_ctrl_mbox_msg *msg) +{ + struct octep_ctrl_net_wait_data *pos, *n; + + list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) { + if (pos->msg.hdr.s.msg_id == msg->hdr.s.msg_id) { + memcpy(&pos->data.resp, + msg->sg_list[0].msg, + msg->hdr.s.sz); + pos->done = 1; + wake_up_interruptible_all(&oct->ctrl_req_wait_q); + break; + } + } +} + +static int process_mbox_notify(struct octep_device *oct, + struct octep_ctrl_mbox_msg *msg) +{ + struct net_device *netdev = oct->netdev; + struct octep_ctrl_net_f2h_req *req; + int cmd; + + req = (struct octep_ctrl_net_f2h_req *)msg->sg_list[0].msg; + cmd = req->hdr.s.cmd; + + /* check if we support this command */ + if (octep_ctrl_net_f2h_cmd_versions[cmd] > OCTEP_CP_VERSION_CURRENT || + octep_ctrl_net_f2h_cmd_versions[cmd] < OCTEP_CP_VERSION_CURRENT) + return -EOPNOTSUPP; + + switch (cmd) { + case OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS: + if (netif_running(netdev)) { + if (req->link.state) { + dev_info(&oct->pdev->dev, "netif_carrier_on\n"); + netif_carrier_on(netdev); + } else { + dev_info(&oct->pdev->dev, "netif_carrier_off\n"); + netif_carrier_off(netdev); + } + } + break; + default: + pr_info("Unknown mbox req : %u\n", req->hdr.s.cmd); + break; + } + + return 0; +} + +void octep_ctrl_net_recv_fw_messages(struct octep_device *oct) +{ + static u16 msg_sz = sizeof(union octep_ctrl_net_max_data); + union octep_ctrl_net_max_data data = {0}; + struct octep_ctrl_mbox_msg msg = {0}; + int ret; + + msg.hdr.s.sz = msg_sz; + msg.sg_num = 1; + msg.sg_list[0].sz = msg_sz; + msg.sg_list[0].msg = &data; + while (true) { + /* mbox will overwrite msg.hdr.s.sz so initialize it */ + msg.hdr.s.sz = msg_sz; + ret = octep_ctrl_mbox_recv(&oct->ctrl_mbox, (struct octep_ctrl_mbox_msg *)&msg); + if (ret < 0) + break; + + if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_RESP) + process_mbox_resp(oct, &msg); + else if (msg.hdr.s.flags & OCTEP_CTRL_MBOX_MSG_HDR_FLAG_NOTIFY) + process_mbox_notify(oct, &msg); + } +} + +int octep_ctrl_net_uninit(struct octep_device *oct) +{ + struct octep_ctrl_net_wait_data *pos, *n; + + list_for_each_entry_safe(pos, n, &oct->ctrl_req_wait_list, list) + pos->done = 1; + + wake_up_interruptible_all(&oct->ctrl_req_wait_q); + + octep_ctrl_mbox_uninit(&oct->ctrl_mbox); + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h new file mode 100644 index 0000000000..1c2ef4ee31 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ +#ifndef __OCTEP_CTRL_NET_H__ +#define __OCTEP_CTRL_NET_H__ + +#include "octep_cp_version.h" + +#define OCTEP_CTRL_NET_INVALID_VFID (-1) + +/* Supported commands */ +enum octep_ctrl_net_cmd { + OCTEP_CTRL_NET_CMD_GET = 0, + OCTEP_CTRL_NET_CMD_SET, +}; + +/* Supported states */ +enum octep_ctrl_net_state { + OCTEP_CTRL_NET_STATE_DOWN = 0, + OCTEP_CTRL_NET_STATE_UP, +}; + +/* Supported replies */ +enum octep_ctrl_net_reply { + OCTEP_CTRL_NET_REPLY_OK = 0, + OCTEP_CTRL_NET_REPLY_GENERIC_FAIL, + OCTEP_CTRL_NET_REPLY_INVALID_PARAM, +}; + +/* Supported host to fw commands */ +enum octep_ctrl_net_h2f_cmd { + OCTEP_CTRL_NET_H2F_CMD_INVALID = 0, + OCTEP_CTRL_NET_H2F_CMD_MTU, + OCTEP_CTRL_NET_H2F_CMD_MAC, + OCTEP_CTRL_NET_H2F_CMD_GET_IF_STATS, + OCTEP_CTRL_NET_H2F_CMD_GET_XSTATS, + OCTEP_CTRL_NET_H2F_CMD_GET_Q_STATS, + OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS, + OCTEP_CTRL_NET_H2F_CMD_RX_STATE, + OCTEP_CTRL_NET_H2F_CMD_LINK_INFO, + OCTEP_CTRL_NET_H2F_CMD_MAX +}; + +/* Supported fw to host commands */ +enum octep_ctrl_net_f2h_cmd { + OCTEP_CTRL_NET_F2H_CMD_INVALID = 0, + OCTEP_CTRL_NET_F2H_CMD_LINK_STATUS, + OCTEP_CTRL_NET_F2H_CMD_MAX +}; + +union octep_ctrl_net_req_hdr { + u64 words[1]; + struct { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* reserved */ + u16 rsvd0; + } s; +}; + +/* get/set mtu request */ +struct octep_ctrl_net_h2f_req_cmd_mtu { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* 0-65535 */ + u16 val; +}; + +/* get/set mac request */ +struct octep_ctrl_net_h2f_req_cmd_mac { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get/set link state, rx state */ +struct octep_ctrl_net_h2f_req_cmd_state { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* link info */ +struct octep_ctrl_net_link_info { + /* Bitmap of Supported link speeds/modes */ + u64 supported_modes; + /* Bitmap of Advertised link speeds/modes */ + u64 advertised_modes; + /* Autonegotation state; bit 0=disabled; bit 1=enabled */ + u8 autoneg; + /* Pause frames setting. bit 0=disabled; bit 1=enabled */ + u8 pause; + /* Negotiated link speed in Mbps */ + u32 speed; +}; + +/* get/set link info */ +struct octep_ctrl_net_h2f_req_cmd_link_info { + /* enum octep_ctrl_net_cmd */ + u16 cmd; + /* struct octep_ctrl_net_link_info */ + struct octep_ctrl_net_link_info info; +}; + +/* Host to fw request data */ +struct octep_ctrl_net_h2f_req { + union octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_h2f_req_cmd_mtu mtu; + struct octep_ctrl_net_h2f_req_cmd_mac mac; + struct octep_ctrl_net_h2f_req_cmd_state link; + struct octep_ctrl_net_h2f_req_cmd_state rx; + struct octep_ctrl_net_h2f_req_cmd_link_info link_info; + }; +} __packed; + +union octep_ctrl_net_resp_hdr { + u64 words[1]; + struct { + /* sender id */ + u16 sender; + /* receiver id */ + u16 receiver; + /* octep_ctrl_net_h2t_cmd */ + u16 cmd; + /* octep_ctrl_net_reply */ + u16 reply; + } s; +}; + +/* get mtu response */ +struct octep_ctrl_net_h2f_resp_cmd_mtu { + /* 0-65535 */ + u16 val; +}; + +/* get mac response */ +struct octep_ctrl_net_h2f_resp_cmd_mac { + /* xx:xx:xx:xx:xx:xx */ + u8 addr[ETH_ALEN]; +}; + +/* get if_stats, xstats, q_stats request */ +struct octep_ctrl_net_h2f_resp_cmd_get_stats { + struct octep_iface_rx_stats rx_stats; + struct octep_iface_tx_stats tx_stats; +}; + +/* get link state, rx state response */ +struct octep_ctrl_net_h2f_resp_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Host to fw response data */ +struct octep_ctrl_net_h2f_resp { + union octep_ctrl_net_resp_hdr hdr; + union { + struct octep_ctrl_net_h2f_resp_cmd_mtu mtu; + struct octep_ctrl_net_h2f_resp_cmd_mac mac; + struct octep_ctrl_net_h2f_resp_cmd_get_stats if_stats; + struct octep_ctrl_net_h2f_resp_cmd_state link; + struct octep_ctrl_net_h2f_resp_cmd_state rx; + struct octep_ctrl_net_link_info link_info; + }; +} __packed; + +/* link state notofication */ +struct octep_ctrl_net_f2h_req_cmd_state { + /* enum octep_ctrl_net_state */ + u16 state; +}; + +/* Fw to host request data */ +struct octep_ctrl_net_f2h_req { + union octep_ctrl_net_req_hdr hdr; + union { + struct octep_ctrl_net_f2h_req_cmd_state link; + }; +}; + +/* Fw to host response data */ +struct octep_ctrl_net_f2h_resp { + union octep_ctrl_net_resp_hdr hdr; +}; + +/* Max data size to be transferred over mbox */ +union octep_ctrl_net_max_data { + struct octep_ctrl_net_h2f_req h2f_req; + struct octep_ctrl_net_h2f_resp h2f_resp; + struct octep_ctrl_net_f2h_req f2h_req; + struct octep_ctrl_net_f2h_resp f2h_resp; +}; + +struct octep_ctrl_net_wait_data { + struct list_head list; + int done; + struct octep_ctrl_mbox_msg msg; + union { + struct octep_ctrl_net_h2f_req req; + struct octep_ctrl_net_h2f_resp resp; + } data; +}; + +/** Initialize data for ctrl net. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on error. + */ +int octep_ctrl_net_init(struct octep_device *oct); + +/** Get link status from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * + * return value: link status 0=down, 1=up. + */ +int octep_ctrl_net_get_link_status(struct octep_device *oct, int vfid); + +/** Set link status in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param up: boolean status. + * @param wait_for_response: poll for response. + * + * return value: 0 on success, -errno on failure + */ +int octep_ctrl_net_set_link_status(struct octep_device *oct, int vfid, bool up, + bool wait_for_response); + +/** Set rx state in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param up: boolean status. + * @param wait_for_response: poll for response. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_set_rx_state(struct octep_device *oct, int vfid, bool up, + bool wait_for_response); + +/** Get mac address from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param addr: non-null pointer to mac address. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_get_mac_addr(struct octep_device *oct, int vfid, u8 *addr); + +/** Set mac address in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param addr: non-null pointer to mac address. + * @param wait_for_response: poll for response. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_set_mac_addr(struct octep_device *oct, int vfid, u8 *addr, + bool wait_for_response); + +/** Set mtu in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param mtu: mtu. + * @param wait_for_response: poll for response. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_set_mtu(struct octep_device *oct, int vfid, int mtu, + bool wait_for_response); + +/** Get interface statistics from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param rx_stats: non-null pointer struct octep_iface_rx_stats. + * @param tx_stats: non-null pointer struct octep_iface_tx_stats. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_get_if_stats(struct octep_device *oct, int vfid, + struct octep_iface_rx_stats *rx_stats, + struct octep_iface_tx_stats *tx_stats); + +/** Get link info from firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param link_info: non-null pointer to struct octep_iface_link_info. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_get_link_info(struct octep_device *oct, int vfid, + struct octep_iface_link_info *link_info); + +/** Set link info in firmware. + * + * @param oct: non-null pointer to struct octep_device. + * @param vfid: Index of virtual function. + * @param link_info: non-null pointer to struct octep_iface_link_info. + * @param wait_for_response: poll for response. + * + * return value: 0 on success, -errno on failure. + */ +int octep_ctrl_net_set_link_info(struct octep_device *oct, + int vfid, + struct octep_iface_link_info *link_info, + bool wait_for_response); + +/** Poll for firmware messages and process them. + * + * @param oct: non-null pointer to struct octep_device. + */ +void octep_ctrl_net_recv_fw_messages(struct octep_device *oct); + +/** Uninitialize data for ctrl net. + * + * @param oct: non-null pointer to struct octep_device. + * + * return value: 0 on success, -errno on error. + */ +int octep_ctrl_net_uninit(struct octep_device *oct); + +#endif /* __OCTEP_CTRL_NET_H__ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c new file mode 100644 index 0000000000..7d0124b283 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ethtool.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +static const char octep_gstrings_global_stats[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_alloc_errors", + "tx_busy_errors", + "rx_dropped", + "tx_dropped", + "tx_hw_pkts", + "tx_hw_octs", + "tx_hw_bcast", + "tx_hw_mcast", + "tx_hw_underflow", + "tx_hw_control", + "tx_less_than_64", + "tx_equal_64", + "tx_equal_65_to_127", + "tx_equal_128_to_255", + "tx_equal_256_to_511", + "tx_equal_512_to_1023", + "tx_equal_1024_to_1518", + "tx_greater_than_1518", + "rx_hw_pkts", + "rx_hw_bytes", + "rx_hw_bcast", + "rx_hw_mcast", + "rx_pause_pkts", + "rx_pause_bytes", + "rx_dropped_pkts_fifo_full", + "rx_dropped_bytes_fifo_full", + "rx_err_pkts", +}; + +#define OCTEP_GLOBAL_STATS_CNT (sizeof(octep_gstrings_global_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_tx_q_stats[][ETH_GSTRING_LEN] = { + "tx_packets_posted[Q-%u]", + "tx_packets_completed[Q-%u]", + "tx_bytes[Q-%u]", + "tx_busy[Q-%u]", +}; + +#define OCTEP_TX_Q_STATS_CNT (sizeof(octep_gstrings_tx_q_stats) / ETH_GSTRING_LEN) + +static const char octep_gstrings_rx_q_stats[][ETH_GSTRING_LEN] = { + "rx_packets[Q-%u]", + "rx_bytes[Q-%u]", + "rx_alloc_errors[Q-%u]", +}; + +#define OCTEP_RX_Q_STATS_CNT (sizeof(octep_gstrings_rx_q_stats) / ETH_GSTRING_LEN) + +static void octep_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + struct octep_device *oct = netdev_priv(netdev); + + strscpy(info->driver, OCTEP_DRV_NAME, sizeof(info->driver)); + strscpy(info->bus_info, pci_name(oct->pdev), sizeof(info->bus_info)); +} + +static void octep_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + char *strings = (char *)data; + int i, j; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < OCTEP_GLOBAL_STATS_CNT; i++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_global_stats[i]); + strings += ETH_GSTRING_LEN; + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_TX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_tx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + + for (i = 0; i < num_queues; i++) { + for (j = 0; j < OCTEP_RX_Q_STATS_CNT; j++) { + snprintf(strings, ETH_GSTRING_LEN, + octep_gstrings_rx_q_stats[j], i); + strings += ETH_GSTRING_LEN; + } + } + break; + default: + break; + } +} + +static int octep_get_sset_count(struct net_device *netdev, int sset) +{ + struct octep_device *oct = netdev_priv(netdev); + u16 num_queues = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); + + switch (sset) { + case ETH_SS_STATS: + return OCTEP_GLOBAL_STATS_CNT + (num_queues * + (OCTEP_TX_Q_STATS_CNT + OCTEP_RX_Q_STATS_CNT)); + break; + default: + return -EOPNOTSUPP; + } +} + +static void +octep_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_tx_stats *iface_tx_stats; + struct octep_iface_rx_stats *iface_rx_stats; + u64 rx_packets, rx_bytes; + u64 tx_packets, tx_bytes; + u64 rx_alloc_errors, tx_busy_errors; + int q, i; + + rx_packets = 0; + rx_bytes = 0; + tx_packets = 0; + tx_bytes = 0; + rx_alloc_errors = 0; + tx_busy_errors = 0; + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + + iface_tx_stats = &oct->iface_tx_stats; + iface_rx_stats = &oct->iface_rx_stats; + octep_ctrl_net_get_if_stats(oct, + OCTEP_CTRL_NET_INVALID_VFID, + iface_rx_stats, + iface_tx_stats); + + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + tx_busy_errors += iq->stats.tx_busy; + + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + rx_alloc_errors += oq->stats.alloc_failures; + } + i = 0; + data[i++] = rx_packets; + data[i++] = tx_packets; + data[i++] = rx_bytes; + data[i++] = tx_bytes; + data[i++] = rx_alloc_errors; + data[i++] = tx_busy_errors; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full + + iface_rx_stats->err_pkts; + data[i++] = iface_tx_stats->xscol + + iface_tx_stats->xsdef; + data[i++] = iface_tx_stats->pkts; + data[i++] = iface_tx_stats->octs; + data[i++] = iface_tx_stats->bcst; + data[i++] = iface_tx_stats->mcst; + data[i++] = iface_tx_stats->undflw; + data[i++] = iface_tx_stats->ctl; + data[i++] = iface_tx_stats->hist_lt64; + data[i++] = iface_tx_stats->hist_eq64; + data[i++] = iface_tx_stats->hist_65to127; + data[i++] = iface_tx_stats->hist_128to255; + data[i++] = iface_tx_stats->hist_256to511; + data[i++] = iface_tx_stats->hist_512to1023; + data[i++] = iface_tx_stats->hist_1024to1518; + data[i++] = iface_tx_stats->hist_gt1518; + data[i++] = iface_rx_stats->pkts; + data[i++] = iface_rx_stats->octets; + data[i++] = iface_rx_stats->mcast_pkts; + data[i++] = iface_rx_stats->bcast_pkts; + data[i++] = iface_rx_stats->pause_pkts; + data[i++] = iface_rx_stats->pause_octets; + data[i++] = iface_rx_stats->dropped_pkts_fifo_full; + data[i++] = iface_rx_stats->dropped_octets_fifo_full; + data[i++] = iface_rx_stats->err_pkts; + + /* Per Tx Queue stats */ + for (q = 0; q < oct->num_iqs; q++) { + struct octep_iq *iq = oct->iq[q]; + + data[i++] = iq->stats.instr_posted; + data[i++] = iq->stats.instr_completed; + data[i++] = iq->stats.bytes_sent; + data[i++] = iq->stats.tx_busy; + } + + /* Per Rx Queue stats */ + for (q = 0; q < oct->num_oqs; q++) { + struct octep_oq *oq = oct->oq[q]; + + data[i++] = oq->stats.packets; + data[i++] = oq->stats.bytes; + data[i++] = oq->stats.alloc_failures; + } +} + +#define OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(octep_speeds, ksettings, name) \ +{ \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_T)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseT_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_R)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseR_FEC); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseLR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_10GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 10000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_25GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 25000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseLR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_40GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 40000baseSR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR2)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR2_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_CR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseCR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_KR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseKR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_LR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseLR_ER_FR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_50GBASE_SR)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 50000baseSR_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_CR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseCR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_KR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseKR4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_LR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseLR4_ER4_Full); \ + if ((octep_speeds) & BIT(OCTEP_LINK_MODE_100GBASE_SR4)) \ + ethtool_link_ksettings_add_link_mode(ksettings, name, 100000baseSR4_Full); \ +} + +static int octep_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + u32 advertised_modes, supported_modes; + + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + + link_info = &oct->link_info; + octep_ctrl_net_get_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, link_info); + + advertised_modes = oct->link_info.advertised_modes; + supported_modes = oct->link_info.supported_modes; + + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(supported_modes, cmd, supported); + OCTEP_SET_ETHTOOL_LINK_MODES_BITMAP(advertised_modes, cmd, advertising); + + if (link_info->autoneg) { + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + if (link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_ADVERTISED) { + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + cmd->base.autoneg = AUTONEG_ENABLE; + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + } else { + cmd->base.autoneg = AUTONEG_DISABLE; + } + + if (link_info->pause) { + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_SUPPORTED) + ethtool_link_ksettings_add_link_mode(cmd, supported, Pause); + if (link_info->pause & OCTEP_LINK_MODE_PAUSE_ADVERTISED) + ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause); + } + + cmd->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + + if (netif_carrier_ok(netdev)) { + cmd->base.speed = link_info->speed; + cmd->base.duplex = DUPLEX_FULL; + } else { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } + return 0; +} + +static int octep_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info link_info_new; + struct octep_iface_link_info *link_info; + u64 advertised = 0; + u8 autoneg = 0; + int err; + + link_info = &oct->link_info; + memcpy(&link_info_new, link_info, sizeof(struct octep_iface_link_info)); + + /* Only Full duplex is supported; + * Assume full duplex when duplex is unknown. + */ + if (cmd->base.duplex != DUPLEX_FULL && + cmd->base.duplex != DUPLEX_UNKNOWN) + return -EOPNOTSUPP; + + if (cmd->base.autoneg == AUTONEG_ENABLE) { + if (!(link_info->autoneg & OCTEP_LINK_MODE_AUTONEG_SUPPORTED)) + return -EOPNOTSUPP; + autoneg = 1; + } + + if (!bitmap_subset(cmd->link_modes.advertising, + cmd->link_modes.supported, + __ETHTOOL_LINK_MODE_MASK_NBITS)) + return -EINVAL; + + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseT_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_T); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseR_FEC)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_R); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseLR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 10000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_10GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 25000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_25GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseLR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 40000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_40GBASE_SR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR2_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR2); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseCR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_CR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseKR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_KR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseLR_ER_FR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_LR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 50000baseSR_Full)) + advertised |= BIT(OCTEP_LINK_MODE_50GBASE_SR); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseCR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_CR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseKR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_KR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseLR4_ER4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_LR4); + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, + 100000baseSR4_Full)) + advertised |= BIT(OCTEP_LINK_MODE_100GBASE_SR4); + + if (advertised == link_info->advertised_modes && + cmd->base.speed == link_info->speed && + cmd->base.autoneg == link_info->autoneg) + return 0; + + link_info_new.advertised_modes = advertised; + link_info_new.speed = cmd->base.speed; + link_info_new.autoneg = autoneg; + + err = octep_ctrl_net_set_link_info(oct, OCTEP_CTRL_NET_INVALID_VFID, + &link_info_new, true); + if (err) + return err; + + memcpy(link_info, &link_info_new, sizeof(struct octep_iface_link_info)); + return 0; +} + +static const struct ethtool_ops octep_ethtool_ops = { + .get_drvinfo = octep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = octep_get_strings, + .get_sset_count = octep_get_sset_count, + .get_ethtool_stats = octep_get_ethtool_stats, + .get_link_ksettings = octep_get_link_ksettings, + .set_link_ksettings = octep_set_link_ksettings, +}; + +void octep_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &octep_ethtool_ops; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c new file mode 100644 index 0000000000..2ee1374db4 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -0,0 +1,1273 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" +#include "octep_ctrl_net.h" + +#define OCTEP_INTR_POLL_TIME_MSECS 100 +struct workqueue_struct *octep_wq; + +/* Supported Devices */ +static const struct pci_device_id octep_pci_id_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CN93_PF)}, + {PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, OCTEP_PCI_DEVICE_ID_CNF95N_PF)}, + {0, }, +}; +MODULE_DEVICE_TABLE(pci, octep_pci_id_tbl); + +MODULE_AUTHOR("Veerasenareddy Burru <vburru@marvell.com>"); +MODULE_DESCRIPTION(OCTEP_DRV_STRING); +MODULE_LICENSE("GPL"); + +/** + * octep_alloc_ioq_vectors() - Allocate Tx/Rx Queue interrupt info. + * + * @oct: Octeon device private data structure. + * + * Allocate resources to hold per Tx/Rx queue interrupt info. + * This is the information passed to interrupt handler, from which napi poll + * is scheduled and includes quick access to private data of Tx/Rx queue + * corresponding to the interrupt being handled. + * + * Return: 0, on successful allocation of resources for all queue interrupts. + * -1, if failed to allocate any resource. + */ +static int octep_alloc_ioq_vectors(struct octep_device *oct) +{ + int i; + struct octep_ioq_vector *ioq_vector; + + for (i = 0; i < oct->num_oqs; i++) { + oct->ioq_vector[i] = vzalloc(sizeof(*oct->ioq_vector[i])); + if (!oct->ioq_vector[i]) + goto free_ioq_vector; + + ioq_vector = oct->ioq_vector[i]; + ioq_vector->iq = oct->iq[i]; + ioq_vector->oq = oct->oq[i]; + ioq_vector->octep_dev = oct; + } + + dev_info(&oct->pdev->dev, "Allocated %d IOQ vectors\n", oct->num_oqs); + return 0; + +free_ioq_vector: + while (i) { + i--; + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + return -1; +} + +/** + * octep_free_ioq_vectors() - Free Tx/Rx Queue interrupt vector info. + * + * @oct: Octeon device private data structure. + */ +static void octep_free_ioq_vectors(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + if (oct->ioq_vector[i]) { + vfree(oct->ioq_vector[i]); + oct->ioq_vector[i] = NULL; + } + } + netdev_info(oct->netdev, "Freed IOQ Vectors\n"); +} + +/** + * octep_enable_msix_range() - enable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Allocate and enable all MSI-x interrupts (queue and non-queue interrupts) + * for the Octeon device. + * + * Return: 0, on successfully enabling all MSI-x interrupts. + * -1, if failed to enable any MSI-x interrupt. + */ +static int octep_enable_msix_range(struct octep_device *oct) +{ + int num_msix, msix_allocated; + int i; + + /* Generic interrupts apart from input/output queues */ + num_msix = oct->num_oqs + CFG_GET_NON_IOQ_MSIX(oct->conf); + oct->msix_entries = kcalloc(num_msix, + sizeof(struct msix_entry), GFP_KERNEL); + if (!oct->msix_entries) + goto msix_alloc_err; + + for (i = 0; i < num_msix; i++) + oct->msix_entries[i].entry = i; + + msix_allocated = pci_enable_msix_range(oct->pdev, oct->msix_entries, + num_msix, num_msix); + if (msix_allocated != num_msix) { + dev_err(&oct->pdev->dev, + "Failed to enable %d msix irqs; got only %d\n", + num_msix, msix_allocated); + goto enable_msix_err; + } + oct->num_irqs = msix_allocated; + dev_info(&oct->pdev->dev, "MSI-X enabled successfully\n"); + + return 0; + +enable_msix_err: + if (msix_allocated > 0) + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; +msix_alloc_err: + return -1; +} + +/** + * octep_disable_msix() - disable MSI-x interrupts. + * + * @oct: Octeon device private data structure. + * + * Disable MSI-x on the Octeon device. + */ +static void octep_disable_msix(struct octep_device *oct) +{ + pci_disable_msix(oct->pdev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + dev_info(&oct->pdev->dev, "Disabled MSI-X\n"); +} + +/** + * octep_non_ioq_intr_handler() - common handler for all generic interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_non_ioq_intr_handler(int irq, void *data) +{ + struct octep_device *oct = data; + + return oct->hw_ops.non_ioq_intr_handler(oct); +} + +/** + * octep_ioq_intr_handler() - handler for all Tx/Rx queue interrupts. + * + * @irq: Interrupt number. + * @data: interrupt data contains pointers to Tx/Rx queue private data + * and correspong NAPI context. + * + * this is common handler for all non-queue (generic) interrupts. + */ +static irqreturn_t octep_ioq_intr_handler(int irq, void *data) +{ + struct octep_ioq_vector *ioq_vector = data; + struct octep_device *oct = ioq_vector->octep_dev; + + return oct->hw_ops.ioq_intr_handler(ioq_vector); +} + +/** + * octep_request_irqs() - Register interrupt handlers. + * + * @oct: Octeon device private data structure. + * + * Register handlers for all queue and non-queue interrupts. + * + * Return: 0, on successful registration of all interrupt handlers. + * -1, on any error. + */ +static int octep_request_irqs(struct octep_device *oct) +{ + struct net_device *netdev = oct->netdev; + struct octep_ioq_vector *ioq_vector; + struct msix_entry *msix_entry; + char **non_ioq_msix_names; + int num_non_ioq_msix; + int ret, i, j; + + num_non_ioq_msix = CFG_GET_NON_IOQ_MSIX(oct->conf); + non_ioq_msix_names = CFG_GET_NON_IOQ_MSIX_NAMES(oct->conf); + + oct->non_ioq_irq_names = kcalloc(num_non_ioq_msix, + OCTEP_MSIX_NAME_SIZE, GFP_KERNEL); + if (!oct->non_ioq_irq_names) + goto alloc_err; + + /* First few MSI-X interrupts are non-queue interrupts */ + for (i = 0; i < num_non_ioq_msix; i++) { + char *irq_name; + + irq_name = &oct->non_ioq_irq_names[i * OCTEP_MSIX_NAME_SIZE]; + msix_entry = &oct->msix_entries[i]; + + snprintf(irq_name, OCTEP_MSIX_NAME_SIZE, + "%s-%s", netdev->name, non_ioq_msix_names[i]); + ret = request_irq(msix_entry->vector, + octep_non_ioq_intr_handler, 0, + irq_name, oct); + if (ret) { + netdev_err(netdev, + "request_irq failed for %s; err=%d", + irq_name, ret); + goto non_ioq_irq_err; + } + } + + /* Request IRQs for Tx/Rx queues */ + for (j = 0; j < oct->num_oqs; j++) { + ioq_vector = oct->ioq_vector[j]; + msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; + + snprintf(ioq_vector->name, sizeof(ioq_vector->name), + "%s-q%d", netdev->name, j); + ret = request_irq(msix_entry->vector, + octep_ioq_intr_handler, 0, + ioq_vector->name, ioq_vector); + if (ret) { + netdev_err(netdev, + "request_irq failed for Q-%d; err=%d", + j, ret); + goto ioq_irq_err; + } + + cpumask_set_cpu(j % num_online_cpus(), + &ioq_vector->affinity_mask); + irq_set_affinity_hint(msix_entry->vector, + &ioq_vector->affinity_mask); + } + + return 0; +ioq_irq_err: + while (j) { + --j; + ioq_vector = oct->ioq_vector[j]; + msix_entry = &oct->msix_entries[j + num_non_ioq_msix]; + + irq_set_affinity_hint(msix_entry->vector, NULL); + free_irq(msix_entry->vector, ioq_vector); + } +non_ioq_irq_err: + while (i) { + --i; + free_irq(oct->msix_entries[i].vector, oct); + } + kfree(oct->non_ioq_irq_names); + oct->non_ioq_irq_names = NULL; +alloc_err: + return -1; +} + +/** + * octep_free_irqs() - free all registered interrupts. + * + * @oct: Octeon device private data structure. + * + * Free all queue and non-queue interrupts of the Octeon device. + */ +static void octep_free_irqs(struct octep_device *oct) +{ + int i; + + /* First few MSI-X interrupts are non queue interrupts; free them */ + for (i = 0; i < CFG_GET_NON_IOQ_MSIX(oct->conf); i++) + free_irq(oct->msix_entries[i].vector, oct); + kfree(oct->non_ioq_irq_names); + + /* Free IRQs for Input/Output (Tx/Rx) queues */ + for (i = CFG_GET_NON_IOQ_MSIX(oct->conf); i < oct->num_irqs; i++) { + irq_set_affinity_hint(oct->msix_entries[i].vector, NULL); + free_irq(oct->msix_entries[i].vector, + oct->ioq_vector[i - CFG_GET_NON_IOQ_MSIX(oct->conf)]); + } + netdev_info(oct->netdev, "IRQs freed\n"); +} + +/** + * octep_setup_irqs() - setup interrupts for the Octeon device. + * + * @oct: Octeon device private data structure. + * + * Allocate data structures to hold per interrupt information, allocate/enable + * MSI-x interrupt and register interrupt handlers. + * + * Return: 0, on successful allocation and registration of all interrupts. + * -1, on any error. + */ +static int octep_setup_irqs(struct octep_device *oct) +{ + if (octep_alloc_ioq_vectors(oct)) + goto ioq_vector_err; + + if (octep_enable_msix_range(oct)) + goto enable_msix_err; + + if (octep_request_irqs(oct)) + goto request_irq_err; + + return 0; + +request_irq_err: + octep_disable_msix(oct); +enable_msix_err: + octep_free_ioq_vectors(oct); +ioq_vector_err: + return -1; +} + +/** + * octep_clean_irqs() - free all interrupts and its resources. + * + * @oct: Octeon device private data structure. + */ +static void octep_clean_irqs(struct octep_device *oct) +{ + octep_free_irqs(oct); + octep_disable_msix(oct); + octep_free_ioq_vectors(oct); +} + +/** + * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue. + * + * @iq: Octeon Tx queue data structure. + * @oq: Octeon Rx queue data structure. + */ +static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) +{ + u32 pkts_pend = oq->pkts_pending; + + netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); + if (iq->pkts_processed) { + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; + } + if (oq->last_pkt_count - pkts_pend) { + writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); + oq->last_pkt_count = pkts_pend; + } + + /* Flush the previous wrties before writing to RESEND bit */ + wmb(); + writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg); + writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg); +} + +/** + * octep_napi_poll() - NAPI poll function for Tx/Rx. + * + * @napi: pointer to napi context. + * @budget: max number of packets to be processed in single invocation. + */ +static int octep_napi_poll(struct napi_struct *napi, int budget) +{ + struct octep_ioq_vector *ioq_vector = + container_of(napi, struct octep_ioq_vector, napi); + u32 tx_pending, rx_done; + + tx_pending = octep_iq_process_completions(ioq_vector->iq, budget); + rx_done = octep_oq_process_rx(ioq_vector->oq, budget); + + /* need more polling if tx completion processing is still pending or + * processed at least 'budget' number of rx packets. + */ + if (tx_pending || rx_done >= budget) + return budget; + + napi_complete(napi); + octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq); + return rx_done; +} + +/** + * octep_napi_add() - Add NAPI poll for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_add(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i); + netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi, + octep_napi_poll); + oct->oq[i]->napi = &oct->ioq_vector[i]->napi; + } +} + +/** + * octep_napi_delete() - delete NAPI poll callback for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_delete(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Deleting NAPI on Q-%d\n", i); + netif_napi_del(&oct->ioq_vector[i]->napi); + oct->oq[i]->napi = NULL; + } +} + +/** + * octep_napi_enable() - enable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_enable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Enabling NAPI on Q-%d\n", i); + napi_enable(&oct->ioq_vector[i]->napi); + } +} + +/** + * octep_napi_disable() - disable NAPI for all Tx/Rx queues. + * + * @oct: Octeon device private data structure. + */ +static void octep_napi_disable(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) { + netdev_dbg(oct->netdev, "Disabling NAPI on Q-%d\n", i); + napi_disable(&oct->ioq_vector[i]->napi); + } +} + +static void octep_link_up(struct net_device *netdev) +{ + netif_carrier_on(netdev); + netif_tx_start_all_queues(netdev); +} + +/** + * octep_open() - start the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * setup Tx/Rx queues, interrupts and enable hardware operation of Tx/Rx queues + * and interrupts.. + * + * Return: 0, on successfully setting up device and bring it up. + * -1, on any error. + */ +static int octep_open(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + int err, ret; + + netdev_info(netdev, "Starting netdev ...\n"); + netif_carrier_off(netdev); + + oct->hw_ops.reset_io_queues(oct); + + if (octep_setup_iqs(oct)) + goto setup_iq_err; + if (octep_setup_oqs(oct)) + goto setup_oq_err; + if (octep_setup_irqs(oct)) + goto setup_irq_err; + + err = netif_set_real_num_tx_queues(netdev, oct->num_oqs); + if (err) + goto set_queues_err; + err = netif_set_real_num_rx_queues(netdev, oct->num_iqs); + if (err) + goto set_queues_err; + + octep_napi_add(oct); + octep_napi_enable(oct); + + oct->link_info.admin_up = 1; + octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, true, + false); + octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, true, + false); + oct->poll_non_ioq_intr = false; + + /* Enable the input and output queues for this Octeon device */ + oct->hw_ops.enable_io_queues(oct); + + /* Enable Octeon device interrupts */ + oct->hw_ops.enable_interrupts(oct); + + octep_oq_dbell_init(oct); + + ret = octep_ctrl_net_get_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID); + if (ret > 0) + octep_link_up(netdev); + + return 0; + +set_queues_err: + octep_clean_irqs(oct); +setup_irq_err: + octep_free_oqs(oct); +setup_oq_err: + octep_free_iqs(oct); +setup_iq_err: + return -1; +} + +/** + * octep_stop() - stop the octeon network device. + * + * @netdev: pointer to kernel network device. + * + * stop the device Tx/Rx operations, bring down the link and + * free up all resources allocated for Tx/Rx queues and interrupts. + */ +static int octep_stop(struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + + netdev_info(netdev, "Stopping the device ...\n"); + + octep_ctrl_net_set_link_status(oct, OCTEP_CTRL_NET_INVALID_VFID, false, + false); + octep_ctrl_net_set_rx_state(oct, OCTEP_CTRL_NET_INVALID_VFID, false, + false); + + /* Stop Tx from stack */ + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + oct->link_info.admin_up = 0; + oct->link_info.oper_up = 0; + + oct->hw_ops.disable_interrupts(oct); + octep_napi_disable(oct); + octep_napi_delete(oct); + + octep_clean_irqs(oct); + octep_clean_iqs(oct); + + oct->hw_ops.disable_io_queues(oct); + oct->hw_ops.reset_io_queues(oct); + octep_free_oqs(oct); + octep_free_iqs(oct); + + oct->poll_non_ioq_intr = true; + queue_delayed_work(octep_wq, &oct->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + + netdev_info(netdev, "Device stopped !!\n"); + return 0; +} + +/** + * octep_iq_full_check() - check if a Tx queue is full. + * + * @iq: Octeon Tx queue data structure. + * + * Return: 0, if the Tx queue is not full. + * 1, if the Tx queue is full. + */ +static inline int octep_iq_full_check(struct octep_iq *iq) +{ + if (likely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) + return 0; + + /* Stop the queue if unable to send */ + netif_stop_subqueue(iq->netdev, iq->q_no); + + /* check again and restart the queue, in case NAPI has just freed + * enough Tx ring entries. + */ + if (unlikely((iq->max_count - atomic_read(&iq->instr_pending)) >= + OCTEP_WAKE_QUEUE_THRESHOLD)) { + netif_start_subqueue(iq->netdev, iq->q_no); + iq->stats.restart_cnt++; + return 0; + } + + return 1; +} + +/** + * octep_start_xmit() - Enqueue packet to Octoen hardware Tx Queue. + * + * @skb: packet skbuff pointer. + * @netdev: kernel network device. + * + * Return: NETDEV_TX_BUSY, if Tx Queue is full. + * NETDEV_TX_OK, if successfully enqueued to hardware Tx queue. + */ +static netdev_tx_t octep_start_xmit(struct sk_buff *skb, + struct net_device *netdev) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_tx_sglist_desc *sglist; + struct octep_tx_buffer *tx_buffer; + struct octep_tx_desc_hw *hw_desc; + struct skb_shared_info *shinfo; + struct octep_instr_hdr *ih; + struct octep_iq *iq; + skb_frag_t *frag; + u16 nr_frags, si; + u16 q_no, wi; + + q_no = skb_get_queue_mapping(skb); + if (q_no >= oct->num_iqs) { + netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); + q_no = q_no % oct->num_iqs; + } + + iq = oct->iq[q_no]; + if (octep_iq_full_check(iq)) { + iq->stats.tx_busy++; + return NETDEV_TX_BUSY; + } + + shinfo = skb_shinfo(skb); + nr_frags = shinfo->nr_frags; + + wi = iq->host_write_index; + hw_desc = &iq->desc_ring[wi]; + hw_desc->ih64 = 0; + + tx_buffer = iq->buff_info + wi; + tx_buffer->skb = skb; + + ih = &hw_desc->ih; + ih->tlen = skb->len; + ih->pkind = oct->pkind; + + if (!nr_frags) { + tx_buffer->gather = 0; + tx_buffer->dma = dma_map_single(iq->dev, skb->data, + skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, tx_buffer->dma)) + goto dma_map_err; + hw_desc->dptr = tx_buffer->dma; + } else { + /* Scatter/Gather */ + dma_addr_t dma; + u16 len; + + sglist = tx_buffer->sglist; + + ih->gsz = nr_frags + 1; + ih->gather = 1; + tx_buffer->gather = 1; + + len = skb_headlen(skb); + dma = dma_map_single(iq->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_err; + + dma_sync_single_for_cpu(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + memset(sglist, 0, OCTEP_SGLIST_SIZE_PER_PKT); + sglist[0].len[3] = len; + sglist[0].dma_ptr[0] = dma; + + si = 1; /* entry 0 is main skb, mapped above */ + frag = &shinfo->frags[0]; + while (nr_frags--) { + len = skb_frag_size(frag); + dma = skb_frag_dma_map(iq->dev, frag, 0, + len, DMA_TO_DEVICE); + if (dma_mapping_error(iq->dev, dma)) + goto dma_map_sg_err; + + sglist[si >> 2].len[3 - (si & 3)] = len; + sglist[si >> 2].dma_ptr[si & 3] = dma; + + frag++; + si++; + } + dma_sync_single_for_device(iq->dev, tx_buffer->sglist_dma, + OCTEP_SGLIST_SIZE_PER_PKT, + DMA_TO_DEVICE); + + hw_desc->dptr = tx_buffer->sglist_dma; + } + + netdev_tx_sent_queue(iq->netdev_q, skb->len); + skb_tx_timestamp(skb); + atomic_inc(&iq->instr_pending); + wi++; + if (wi == iq->max_count) + wi = 0; + iq->host_write_index = wi; + /* Flush the hw descriptor before writing to doorbell */ + wmb(); + + /* Ring Doorbell to notify the NIC there is a new packet */ + writel(1, iq->doorbell_reg); + iq->stats.instr_posted++; + return NETDEV_TX_OK; + +dma_map_sg_err: + if (si > 0) { + dma_unmap_single(iq->dev, sglist[0].dma_ptr[0], + sglist[0].len[3], DMA_TO_DEVICE); + sglist[0].len[3] = 0; + } + while (si > 1) { + dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3], + sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE); + sglist[si >> 2].len[3 - (si & 3)] = 0; + si--; + } + tx_buffer->gather = 0; +dma_map_err: + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +/** + * octep_get_stats64() - Get Octeon network device statistics. + * + * @netdev: kernel network device. + * @stats: pointer to stats structure to be filled in. + */ +static void octep_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + u64 tx_packets, tx_bytes, rx_packets, rx_bytes; + struct octep_device *oct = netdev_priv(netdev); + int q; + + if (netif_running(netdev)) + octep_ctrl_net_get_if_stats(oct, + OCTEP_CTRL_NET_INVALID_VFID, + &oct->iface_rx_stats, + &oct->iface_tx_stats); + + tx_packets = 0; + tx_bytes = 0; + rx_packets = 0; + rx_bytes = 0; + for (q = 0; q < oct->num_oqs; q++) { + struct octep_iq *iq = oct->iq[q]; + struct octep_oq *oq = oct->oq[q]; + + tx_packets += iq->stats.instr_completed; + tx_bytes += iq->stats.bytes_sent; + rx_packets += oq->stats.packets; + rx_bytes += oq->stats.bytes; + } + stats->tx_packets = tx_packets; + stats->tx_bytes = tx_bytes; + stats->rx_packets = rx_packets; + stats->rx_bytes = rx_bytes; + stats->multicast = oct->iface_rx_stats.mcast_pkts; + stats->rx_errors = oct->iface_rx_stats.err_pkts; + stats->collisions = oct->iface_tx_stats.xscol; + stats->tx_fifo_errors = oct->iface_tx_stats.undflw; +} + +/** + * octep_tx_timeout_task - work queue task to Handle Tx queue timeout. + * + * @work: pointer to Tx queue timeout work_struct + * + * Stop and start the device so that it frees up all queue resources + * and restarts the queues, that potentially clears a Tx queue timeout + * condition. + **/ +static void octep_tx_timeout_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + tx_timeout_task); + struct net_device *netdev = oct->netdev; + + rtnl_lock(); + if (netif_running(netdev)) { + octep_stop(netdev); + octep_open(netdev); + } + rtnl_unlock(); +} + +/** + * octep_tx_timeout() - Handle Tx Queue timeout. + * + * @netdev: pointer to kernel network device. + * @txqueue: Timed out Tx queue number. + * + * Schedule a work to handle Tx queue timeout. + */ +static void octep_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct octep_device *oct = netdev_priv(netdev); + + queue_work(octep_wq, &oct->tx_timeout_task); +} + +static int octep_set_mac(struct net_device *netdev, void *p) +{ + struct octep_device *oct = netdev_priv(netdev); + struct sockaddr *addr = (struct sockaddr *)p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = octep_ctrl_net_set_mac_addr(oct, OCTEP_CTRL_NET_INVALID_VFID, + addr->sa_data, true); + if (err) + return err; + + memcpy(oct->mac_addr, addr->sa_data, ETH_ALEN); + eth_hw_addr_set(netdev, addr->sa_data); + + return 0; +} + +static int octep_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octep_device *oct = netdev_priv(netdev); + struct octep_iface_link_info *link_info; + int err = 0; + + link_info = &oct->link_info; + if (link_info->mtu == new_mtu) + return 0; + + err = octep_ctrl_net_set_mtu(oct, OCTEP_CTRL_NET_INVALID_VFID, new_mtu, + true); + if (!err) { + oct->link_info.mtu = new_mtu; + netdev->mtu = new_mtu; + } + + return err; +} + +static const struct net_device_ops octep_netdev_ops = { + .ndo_open = octep_open, + .ndo_stop = octep_stop, + .ndo_start_xmit = octep_start_xmit, + .ndo_get_stats64 = octep_get_stats64, + .ndo_tx_timeout = octep_tx_timeout, + .ndo_set_mac_address = octep_set_mac, + .ndo_change_mtu = octep_change_mtu, +}; + +/** + * octep_intr_poll_task - work queue task to process non-ioq interrupts. + * + * @work: pointer to mbox work_struct + * + * Process non-ioq interrupts to handle control mailbox, pfvf mailbox. + **/ +static void octep_intr_poll_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + intr_poll_task.work); + + if (!oct->poll_non_ioq_intr) { + dev_info(&oct->pdev->dev, "Interrupt poll task stopped.\n"); + return; + } + + oct->hw_ops.poll_non_ioq_interrupts(oct); + queue_delayed_work(octep_wq, &oct->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); +} + +/** + * octep_hb_timeout_task - work queue task to check firmware heartbeat. + * + * @work: pointer to hb work_struct + * + * Check for heartbeat miss count. Uninitialize oct device if miss count + * exceeds configured max heartbeat miss count. + * + **/ +static void octep_hb_timeout_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + hb_task.work); + + int miss_cnt; + + miss_cnt = atomic_inc_return(&oct->hb_miss_cnt); + if (miss_cnt < oct->conf->max_hb_miss_cnt) { + queue_delayed_work(octep_wq, &oct->hb_task, + msecs_to_jiffies(oct->conf->hb_interval * 1000)); + return; + } + + dev_err(&oct->pdev->dev, "Missed %u heartbeats. Uninitializing\n", + miss_cnt); + rtnl_lock(); + if (netif_running(oct->netdev)) + octep_stop(oct->netdev); + rtnl_unlock(); +} + +/** + * octep_ctrl_mbox_task - work queue task to handle ctrl mbox messages. + * + * @work: pointer to ctrl mbox work_struct + * + * Poll ctrl mbox message queue and handle control messages from firmware. + **/ +static void octep_ctrl_mbox_task(struct work_struct *work) +{ + struct octep_device *oct = container_of(work, struct octep_device, + ctrl_mbox_task); + + octep_ctrl_net_recv_fw_messages(oct); +} + +static const char *octep_devid_to_str(struct octep_device *oct) +{ + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_PF: + return "CN93XX"; + case OCTEP_PCI_DEVICE_ID_CNF95N_PF: + return "CNF95N"; + default: + return "Unsupported"; + } +} + +/** + * octep_device_setup() - Setup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Setup Octeon device hardware operations, configuration, etc ... + */ +int octep_device_setup(struct octep_device *oct) +{ + struct pci_dev *pdev = oct->pdev; + int i, ret; + + /* allocate memory for oct->conf */ + oct->conf = kzalloc(sizeof(*oct->conf), GFP_KERNEL); + if (!oct->conf) + return -ENOMEM; + + /* Map BAR regions */ + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + oct->mmio[i].hw_addr = + ioremap(pci_resource_start(oct->pdev, i * 2), + pci_resource_len(oct->pdev, i * 2)); + if (!oct->mmio[i].hw_addr) + goto unmap_prev; + + oct->mmio[i].mapped = 1; + } + + oct->chip_id = pdev->device; + oct->rev_id = pdev->revision; + dev_info(&pdev->dev, "chip_id = 0x%x\n", pdev->device); + + switch (oct->chip_id) { + case OCTEP_PCI_DEVICE_ID_CN93_PF: + case OCTEP_PCI_DEVICE_ID_CNF95N_PF: + dev_info(&pdev->dev, "Setting up OCTEON %s PF PASS%d.%d\n", + octep_devid_to_str(oct), OCTEP_MAJOR_REV(oct), + OCTEP_MINOR_REV(oct)); + octep_device_setup_cn93_pf(oct); + break; + default: + dev_err(&pdev->dev, + "%s: unsupported device\n", __func__); + goto unsupported_dev; + } + + oct->pkind = CFG_GET_IQ_PKIND(oct->conf); + + ret = octep_ctrl_net_init(oct); + if (ret) + return ret; + + atomic_set(&oct->hb_miss_cnt, 0); + INIT_DELAYED_WORK(&oct->hb_task, octep_hb_timeout_task); + queue_delayed_work(octep_wq, &oct->hb_task, + msecs_to_jiffies(oct->conf->hb_interval * 1000)); + return 0; + +unsupported_dev: + i = OCTEP_MMIO_REGIONS; +unmap_prev: + while (i--) + iounmap(oct->mmio[i].hw_addr); + + kfree(oct->conf); + return -1; +} + +/** + * octep_device_cleanup() - Cleanup Octeon Device. + * + * @oct: Octeon device private data structure. + * + * Cleanup Octeon device allocated resources. + */ +static void octep_device_cleanup(struct octep_device *oct) +{ + int i; + + oct->poll_non_ioq_intr = false; + cancel_delayed_work_sync(&oct->intr_poll_task); + cancel_work_sync(&oct->ctrl_mbox_task); + + dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n"); + + for (i = 0; i < OCTEP_MAX_VF; i++) { + vfree(oct->mbox[i]); + oct->mbox[i] = NULL; + } + + octep_ctrl_net_uninit(oct); + cancel_delayed_work_sync(&oct->hb_task); + + oct->hw_ops.soft_reset(oct); + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) { + if (oct->mmio[i].mapped) + iounmap(oct->mmio[i].hw_addr); + } + + kfree(oct->conf); + oct->conf = NULL; +} + +static bool get_fw_ready_status(struct pci_dev *pdev) +{ + u32 pos = 0; + u16 vsec_id; + u8 status; + + while ((pos = pci_find_next_ext_capability(pdev, pos, + PCI_EXT_CAP_ID_VNDR))) { + pci_read_config_word(pdev, pos + 4, &vsec_id); +#define FW_STATUS_VSEC_ID 0xA3 + if (vsec_id != FW_STATUS_VSEC_ID) + continue; + + pci_read_config_byte(pdev, (pos + 8), &status); + dev_info(&pdev->dev, "Firmware ready status = %u\n", status); +#define FW_STATUS_READY 1ULL + return status == FW_STATUS_READY; + } + return false; +} + +/** + * octep_probe() - Octeon PCI device probe handler. + * + * @pdev: PCI device structure. + * @ent: entry in Octeon PCI device ID table. + * + * Initializes and enables the Octeon PCI device for network operations. + * Initializes Octeon private data structure and registers a network device. + */ +static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct octep_device *octep_dev = NULL; + struct net_device *netdev; + int err; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "Failed to set DMA mask !!\n"); + goto err_dma_mask; + } + + err = pci_request_mem_regions(pdev, OCTEP_DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Failed to map PCI memory regions\n"); + goto err_pci_regions; + } + + pci_set_master(pdev); + + if (!get_fw_ready_status(pdev)) { + dev_notice(&pdev->dev, "Firmware not ready; defer probe.\n"); + err = -EPROBE_DEFER; + goto err_alloc_netdev; + } + + netdev = alloc_etherdev_mq(sizeof(struct octep_device), + OCTEP_MAX_QUEUES); + if (!netdev) { + dev_err(&pdev->dev, "Failed to allocate netdev\n"); + err = -ENOMEM; + goto err_alloc_netdev; + } + SET_NETDEV_DEV(netdev, &pdev->dev); + + octep_dev = netdev_priv(netdev); + octep_dev->netdev = netdev; + octep_dev->pdev = pdev; + octep_dev->dev = &pdev->dev; + pci_set_drvdata(pdev, octep_dev); + + err = octep_device_setup(octep_dev); + if (err) { + dev_err(&pdev->dev, "Device setup failed\n"); + goto err_octep_config; + } + INIT_WORK(&octep_dev->tx_timeout_task, octep_tx_timeout_task); + INIT_WORK(&octep_dev->ctrl_mbox_task, octep_ctrl_mbox_task); + INIT_DELAYED_WORK(&octep_dev->intr_poll_task, octep_intr_poll_task); + octep_dev->poll_non_ioq_intr = true; + queue_delayed_work(octep_wq, &octep_dev->intr_poll_task, + msecs_to_jiffies(OCTEP_INTR_POLL_TIME_MSECS)); + + netdev->netdev_ops = &octep_netdev_ops; + octep_set_ethtool_ops(netdev); + netif_carrier_off(netdev); + + netdev->hw_features = NETIF_F_SG; + netdev->features |= netdev->hw_features; + netdev->min_mtu = OCTEP_MIN_MTU; + netdev->max_mtu = OCTEP_MAX_MTU; + netdev->mtu = OCTEP_DEFAULT_MTU; + + err = octep_ctrl_net_get_mac_addr(octep_dev, OCTEP_CTRL_NET_INVALID_VFID, + octep_dev->mac_addr); + if (err) { + dev_err(&pdev->dev, "Failed to get mac address\n"); + goto register_dev_err; + } + eth_hw_addr_set(netdev, octep_dev->mac_addr); + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev\n"); + goto register_dev_err; + } + dev_info(&pdev->dev, "Device probe successful\n"); + return 0; + +register_dev_err: + octep_device_cleanup(octep_dev); +err_octep_config: + free_netdev(netdev); +err_alloc_netdev: + pci_release_mem_regions(pdev); +err_pci_regions: +err_dma_mask: + pci_disable_device(pdev); + return err; +} + +/** + * octep_remove() - Remove Octeon PCI device from driver control. + * + * @pdev: PCI device structure of the Octeon device. + * + * Cleanup all resources allocated for the Octeon device. + * Unregister from network device and disable the PCI device. + */ +static void octep_remove(struct pci_dev *pdev) +{ + struct octep_device *oct = pci_get_drvdata(pdev); + struct net_device *netdev; + + if (!oct) + return; + + netdev = oct->netdev; + if (netdev->reg_state == NETREG_REGISTERED) + unregister_netdev(netdev); + + cancel_work_sync(&oct->tx_timeout_task); + octep_device_cleanup(oct); + pci_release_mem_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +static struct pci_driver octep_driver = { + .name = OCTEP_DRV_NAME, + .id_table = octep_pci_id_tbl, + .probe = octep_probe, + .remove = octep_remove, +}; + +/** + * octep_init_module() - Module initialiation. + * + * create common resource for the driver and register PCI driver. + */ +static int __init octep_init_module(void) +{ + int ret; + + pr_info("%s: Loading %s ...\n", OCTEP_DRV_NAME, OCTEP_DRV_STRING); + + /* work queue for all deferred tasks */ + octep_wq = create_singlethread_workqueue(OCTEP_DRV_NAME); + if (!octep_wq) { + pr_err("%s: Failed to create common workqueue\n", + OCTEP_DRV_NAME); + return -ENOMEM; + } + + ret = pci_register_driver(&octep_driver); + if (ret < 0) { + pr_err("%s: Failed to register PCI driver; err=%d\n", + OCTEP_DRV_NAME, ret); + destroy_workqueue(octep_wq); + return ret; + } + + pr_info("%s: Loaded successfully !\n", OCTEP_DRV_NAME); + + return ret; +} + +/** + * octep_exit_module() - Module exit routine. + * + * unregister the driver with PCI subsystem and cleanup common resources. + */ +static void __exit octep_exit_module(void) +{ + pr_info("%s: Unloading ...\n", OCTEP_DRV_NAME); + + pci_unregister_driver(&octep_driver); + destroy_workqueue(octep_wq); + + pr_info("%s: Unloading complete\n", OCTEP_DRV_NAME); +} + +module_init(octep_init_module); +module_exit(octep_exit_module); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.h b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h new file mode 100644 index 0000000000..e0907a7191 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.h @@ -0,0 +1,375 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_MAIN_H_ +#define _OCTEP_MAIN_H_ + +#include "octep_tx.h" +#include "octep_rx.h" +#include "octep_ctrl_mbox.h" + +#define OCTEP_DRV_NAME "octeon_ep" +#define OCTEP_DRV_STRING "Marvell Octeon EndPoint NIC Driver" + +#define OCTEP_PCIID_CN93_PF 0xB200177d +#define OCTEP_PCIID_CN93_VF 0xB203177d + +#define OCTEP_PCI_DEVICE_ID_CN93_PF 0xB200 +#define OCTEP_PCI_DEVICE_ID_CN93_VF 0xB203 + +#define OCTEP_PCI_DEVICE_ID_CNF95N_PF 0xB400 //95N PF + +#define OCTEP_MAX_QUEUES 63 +#define OCTEP_MAX_IQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_OQ OCTEP_MAX_QUEUES +#define OCTEP_MAX_VF 64 + +#define OCTEP_MAX_MSIX_VECTORS OCTEP_MAX_OQ + +/* Flags to disable and enable Interrupts */ +#define OCTEP_INPUT_INTR (1) +#define OCTEP_OUTPUT_INTR (2) +#define OCTEP_MBOX_INTR (4) +#define OCTEP_ALL_INTR 0xff + +#define OCTEP_IQ_INTR_RESEND_BIT 59 +#define OCTEP_OQ_INTR_RESEND_BIT 59 + +#define OCTEP_MMIO_REGIONS 3 +/* PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octep_mmio { + /* The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /* Flag indicating the mapping was successful. */ + int mapped; +}; + +struct octep_pci_win_regs { + u8 __iomem *pci_win_wr_addr; + u8 __iomem *pci_win_rd_addr; + u8 __iomem *pci_win_wr_data; + u8 __iomem *pci_win_rd_data; +}; + +struct octep_hw_ops { + void (*setup_iq_regs)(struct octep_device *oct, int q); + void (*setup_oq_regs)(struct octep_device *oct, int q); + void (*setup_mbox_regs)(struct octep_device *oct, int mbox); + + irqreturn_t (*non_ioq_intr_handler)(void *ioq_vector); + irqreturn_t (*ioq_intr_handler)(void *ioq_vector); + int (*soft_reset)(struct octep_device *oct); + void (*reinit_regs)(struct octep_device *oct); + u32 (*update_iq_read_idx)(struct octep_iq *iq); + + void (*enable_interrupts)(struct octep_device *oct); + void (*disable_interrupts)(struct octep_device *oct); + bool (*poll_non_ioq_interrupts)(struct octep_device *oct); + + void (*enable_io_queues)(struct octep_device *oct); + void (*disable_io_queues)(struct octep_device *oct); + void (*enable_iq)(struct octep_device *oct, int q); + void (*disable_iq)(struct octep_device *oct, int q); + void (*enable_oq)(struct octep_device *oct, int q); + void (*disable_oq)(struct octep_device *oct, int q); + void (*reset_io_queues)(struct octep_device *oct); + void (*dump_registers)(struct octep_device *oct); +}; + +/* Octeon mailbox data */ +struct octep_mbox_data { + u32 cmd; + u32 total_len; + u32 recv_len; + u32 rsvd; + u64 *data; +}; + +/* Octeon device mailbox */ +struct octep_mbox { + /* A spinlock to protect access to this q_mbox. */ + spinlock_t lock; + + u32 q_no; + u32 state; + + /* SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + u8 __iomem *mbox_int_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(0) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + u8 __iomem *mbox_write_reg; + + /* SLI_PKT_PF_VF_MBOX_SIG(1) for PF, + * SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + u8 __iomem *mbox_read_reg; + + struct octep_mbox_data mbox_data; +}; + +/* Tx/Rx queue vector per interrupt. */ +struct octep_ioq_vector { + char name[OCTEP_MSIX_NAME_SIZE]; + struct napi_struct napi; + struct octep_device *octep_dev; + struct octep_iq *iq; + struct octep_oq *oq; + cpumask_t affinity_mask; +}; + +/* Octeon hardware/firmware offload capability flags. */ +#define OCTEP_CAP_TX_CHECKSUM BIT(0) +#define OCTEP_CAP_RX_CHECKSUM BIT(1) +#define OCTEP_CAP_TSO BIT(2) + +/* Link modes */ +enum octep_link_mode_bit_indices { + OCTEP_LINK_MODE_10GBASE_T = 0, + OCTEP_LINK_MODE_10GBASE_R, + OCTEP_LINK_MODE_10GBASE_CR, + OCTEP_LINK_MODE_10GBASE_KR, + OCTEP_LINK_MODE_10GBASE_LR, + OCTEP_LINK_MODE_10GBASE_SR, + OCTEP_LINK_MODE_25GBASE_CR, + OCTEP_LINK_MODE_25GBASE_KR, + OCTEP_LINK_MODE_25GBASE_SR, + OCTEP_LINK_MODE_40GBASE_CR4, + OCTEP_LINK_MODE_40GBASE_KR4, + OCTEP_LINK_MODE_40GBASE_LR4, + OCTEP_LINK_MODE_40GBASE_SR4, + OCTEP_LINK_MODE_50GBASE_CR2, + OCTEP_LINK_MODE_50GBASE_KR2, + OCTEP_LINK_MODE_50GBASE_SR2, + OCTEP_LINK_MODE_50GBASE_CR, + OCTEP_LINK_MODE_50GBASE_KR, + OCTEP_LINK_MODE_50GBASE_LR, + OCTEP_LINK_MODE_50GBASE_SR, + OCTEP_LINK_MODE_100GBASE_CR4, + OCTEP_LINK_MODE_100GBASE_KR4, + OCTEP_LINK_MODE_100GBASE_LR4, + OCTEP_LINK_MODE_100GBASE_SR4, + OCTEP_LINK_MODE_NBITS +}; + +/* Hardware interface link state information. */ +struct octep_iface_link_info { + /* Bitmap of Supported link speeds/modes. */ + u64 supported_modes; + + /* Bitmap of Advertised link speeds/modes. */ + u64 advertised_modes; + + /* Negotiated link speed in Mbps. */ + u32 speed; + + /* MTU */ + u16 mtu; + + /* Autonegotation state. */ +#define OCTEP_LINK_MODE_AUTONEG_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_AUTONEG_ADVERTISED BIT(1) + u8 autoneg; + + /* Pause frames setting. */ +#define OCTEP_LINK_MODE_PAUSE_SUPPORTED BIT(0) +#define OCTEP_LINK_MODE_PAUSE_ADVERTISED BIT(1) + u8 pause; + + /* Admin state of the link (ifconfig <iface> up/down */ + u8 admin_up; + + /* Operational state of the link: physical link is up down */ + u8 oper_up; +}; + +/* The Octeon device specific private data structure. + * Each Octeon device has this structure to represent all its components. + */ +struct octep_device { + struct octep_config *conf; + + /* Octeon Chip type. */ + u16 chip_id; + u16 rev_id; + + /* Device capabilities enabled */ + u64 caps_enabled; + /* Device capabilities supported */ + u64 caps_supported; + + /* Pointer to basic Linux device */ + struct device *dev; + /* Linux PCI device pointer */ + struct pci_dev *pdev; + /* Netdev corresponding to the Octeon device */ + struct net_device *netdev; + + /* memory mapped io range */ + struct octep_mmio mmio[OCTEP_MMIO_REGIONS]; + + /* MAC address */ + u8 mac_addr[ETH_ALEN]; + + /* Tx queues (IQ: Instruction Queue) */ + u16 num_iqs; + /* pkind value to be used in every Tx hardware descriptor */ + u8 pkind; + /* Pointers to Octeon Tx queues */ + struct octep_iq *iq[OCTEP_MAX_IQ]; + + /* Rx queues (OQ: Output Queue) */ + u16 num_oqs; + /* Pointers to Octeon Rx queues */ + struct octep_oq *oq[OCTEP_MAX_OQ]; + + /* Hardware port number of the PCIe interface */ + u16 pcie_port; + + /* PCI Window registers to access some hardware CSRs */ + struct octep_pci_win_regs pci_win_regs; + /* Hardware operations */ + struct octep_hw_ops hw_ops; + + /* IRQ info */ + u16 num_irqs; + u16 num_non_ioq_irqs; + char *non_ioq_irq_names; + struct msix_entry *msix_entries; + /* IOq information of it's corresponding MSI-X interrupt. */ + struct octep_ioq_vector *ioq_vector[OCTEP_MAX_QUEUES]; + + /* Hardware Interface Tx statistics */ + struct octep_iface_tx_stats iface_tx_stats; + /* Hardware Interface Rx statistics */ + struct octep_iface_rx_stats iface_rx_stats; + + /* Hardware Interface Link info like supported modes, aneg support */ + struct octep_iface_link_info link_info; + + /* Mailbox to talk to VFs */ + struct octep_mbox *mbox[OCTEP_MAX_VF]; + + /* Work entry to handle Tx timeout */ + struct work_struct tx_timeout_task; + + /* control mbox over pf */ + struct octep_ctrl_mbox ctrl_mbox; + + /* offset for iface stats */ + u32 ctrl_mbox_ifstats_offset; + + /* Work entry to handle ctrl mbox interrupt */ + struct work_struct ctrl_mbox_task; + /* Wait queue for host to firmware requests */ + wait_queue_head_t ctrl_req_wait_q; + /* List of objects waiting for h2f response */ + struct list_head ctrl_req_wait_list; + + /* Enable non-ioq interrupt polling */ + bool poll_non_ioq_intr; + /* Work entry to poll non-ioq interrupts */ + struct delayed_work intr_poll_task; + + /* Firmware heartbeat timer */ + struct timer_list hb_timer; + /* Firmware heartbeat miss count tracked by timer */ + atomic_t hb_miss_cnt; + /* Task to reset device on heartbeat miss */ + struct delayed_work hb_task; +}; + +static inline u16 OCTEP_MAJOR_REV(struct octep_device *oct) +{ + u16 rev = (oct->rev_id & 0xC) >> 2; + + return (rev == 0) ? 1 : rev; +} + +static inline u16 OCTEP_MINOR_REV(struct octep_device *oct) +{ + return (oct->rev_id & 0x3); +} + +/* Octeon CSR read/write access APIs */ +#define octep_write_csr(octep_dev, reg_off, value) \ + writel(value, (octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_write_csr64(octep_dev, reg_off, val64) \ + writeq(val64, (octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr(octep_dev, reg_off) \ + readl((octep_dev)->mmio[0].hw_addr + (reg_off)) + +#define octep_read_csr64(octep_dev, reg_off) \ + readq((octep_dev)->mmio[0].hw_addr + (reg_off)) + +/* Read windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to read. + * + * This routine is called to read from the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return - 64 bit value read from the register. + */ +static inline u64 +OCTEP_PCI_WIN_READ(struct octep_device *oct, u64 addr) +{ + u64 val64; + + addr |= 1ull << 53; /* read 8 bytes */ + writeq(addr, oct->pci_win_regs.pci_win_rd_addr); + val64 = readq(oct->pci_win_regs.pci_win_rd_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val64); + + return val64; +} + +/* Write windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to write + * @param val - Value to write + * + * This routine is called to write to the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return Nothing. + */ +static inline void +OCTEP_PCI_WIN_WRITE(struct octep_device *oct, u64 addr, u64 val) +{ + writeq(addr, oct->pci_win_regs.pci_win_wr_addr); + writeq(val, oct->pci_win_regs.pci_win_wr_data); + + dev_dbg(&oct->pdev->dev, + "%s: reg: 0x%016llx val: 0x%016llx\n", __func__, addr, val); +} + +extern struct workqueue_struct *octep_wq; + +int octep_device_setup(struct octep_device *oct); +int octep_setup_iqs(struct octep_device *oct); +void octep_free_iqs(struct octep_device *oct); +void octep_clean_iqs(struct octep_device *oct); +int octep_setup_oqs(struct octep_device *oct); +void octep_free_oqs(struct octep_device *oct); +void octep_oq_dbell_init(struct octep_device *oct); +void octep_device_setup_cn93_pf(struct octep_device *oct); +int octep_iq_process_completions(struct octep_iq *iq, u16 budget); +int octep_oq_process_rx(struct octep_oq *oq, int budget); +void octep_set_ethtool_ops(struct net_device *netdev); + +#endif /* _OCTEP_MAIN_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h new file mode 100644 index 0000000000..b25c3093dc --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_regs_cn9k_pf.h @@ -0,0 +1,373 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_REGS_CN9K_PF_H_ +#define _OCTEP_REGS_CN9K_PF_H_ + +/* ############################ RST ######################### */ +#define CN93_RST_BOOT 0x000087E006001600ULL +#define CN93_RST_CORE_DOMAIN_W1S 0x000087E006001820ULL +#define CN93_RST_CORE_DOMAIN_W1C 0x000087E006001828ULL + +#define CN93_CONFIG_XPANSION_BAR 0x38 +#define CN93_CONFIG_PCIE_CAP 0x70 +#define CN93_CONFIG_PCIE_DEVCAP 0x74 +#define CN93_CONFIG_PCIE_DEVCTL 0x78 +#define CN93_CONFIG_PCIE_LINKCAP 0x7C +#define CN93_CONFIG_PCIE_LINKCTL 0x80 +#define CN93_CONFIG_PCIE_SLOTCAP 0x84 +#define CN93_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN93_PCIE_SRIOV_FDL 0x188 /* 0x98 */ +#define CN93_PCIE_SRIOV_FDL_BIT_POS 0x10 +#define CN93_PCIE_SRIOV_FDL_MASK 0xFF + +#define CN93_CONFIG_PCIE_FLTMSK 0x720 + +/* ################# Offsets of RING, EPF, MAC ######################### */ +#define CN93_RING_OFFSET (0x1ULL << 17) +#define CN93_EPF_OFFSET (0x1ULL << 25) +#define CN93_MAC_OFFSET (0x1ULL << 4) +#define CN93_BIT_ARRAY_OFFSET (0x1ULL << 4) +#define CN93_EPVF_RING_OFFSET (0x1ULL << 4) + +/* ################# Scratch Registers ######################### */ +#define CN93_SDP_EPF_SCRATCH 0x205E0 + +/* ################# Window Registers ######################### */ +#define CN93_SDP_WIN_WR_ADDR64 0x20000 +#define CN93_SDP_WIN_RD_ADDR64 0x20010 +#define CN93_SDP_WIN_WR_DATA64 0x20020 +#define CN93_SDP_WIN_WR_MASK_REG 0x20030 +#define CN93_SDP_WIN_RD_DATA64 0x20040 + +#define CN93_SDP_MAC_NUMBER 0x2C100 + +/* ################# Global Previliged registers ######################### */ +#define CN93_SDP_EPF_RINFO 0x205F0 + +#define CN93_SDP_EPF_RINFO_SRN(val) ((val) & 0xFF) +#define CN93_SDP_EPF_RINFO_RPVF(val) (((val) >> 32) & 0xF) +#define CN93_SDP_EPF_RINFO_NVFS(val) (((val) >> 48) & 0xFF) + +/* SDP Function select */ +#define CN93_SDP_FUNC_SEL_EPF_BIT_POS 8 +#define CN93_SDP_FUNC_SEL_FUNC_BIT_POS 0 + +/* ##### RING IN (Into device from PCI: Tx Ring) REGISTERS #### */ +#define CN93_SDP_R_IN_CONTROL_START 0x10000 +#define CN93_SDP_R_IN_ENABLE_START 0x10010 +#define CN93_SDP_R_IN_INSTR_BADDR_START 0x10020 +#define CN93_SDP_R_IN_INSTR_RSIZE_START 0x10030 +#define CN93_SDP_R_IN_INSTR_DBELL_START 0x10040 +#define CN93_SDP_R_IN_CNTS_START 0x10050 +#define CN93_SDP_R_IN_INT_LEVELS_START 0x10060 +#define CN93_SDP_R_IN_PKT_CNT_START 0x10080 +#define CN93_SDP_R_IN_BYTE_CNT_START 0x10090 + +#define CN93_SDP_R_IN_CONTROL(ring) \ + (CN93_SDP_R_IN_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_ENABLE(ring) \ + (CN93_SDP_R_IN_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_BADDR(ring) \ + (CN93_SDP_R_IN_INSTR_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_RSIZE(ring) \ + (CN93_SDP_R_IN_INSTR_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INSTR_DBELL(ring) \ + (CN93_SDP_R_IN_INSTR_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS(ring) \ + (CN93_SDP_R_IN_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_LEVELS(ring) \ + (CN93_SDP_R_IN_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_PKT_CNT(ring) \ + (CN93_SDP_R_IN_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_BYTE_CNT(ring) \ + (CN93_SDP_R_IN_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/* Rings per Virtual Function */ +#define CN93_R_IN_CTL_RPVF_MASK (0xF) +#define CN93_R_IN_CTL_RPVF_POS (48) + +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN93_R_IN_CTL_IDLE (0x1ULL << 28) +#define CN93_R_IN_CTL_RDSIZE (0x3ULL << 25) +#define CN93_R_IN_CTL_IS_64B (0x1ULL << 24) +#define CN93_R_IN_CTL_D_NSR (0x1ULL << 8) +#define CN93_R_IN_CTL_D_ESR (0x1ULL << 6) +#define CN93_R_IN_CTL_D_ROR (0x1ULL << 5) +#define CN93_R_IN_CTL_NSR (0x1ULL << 3) +#define CN93_R_IN_CTL_ESR (0x1ULL << 1) +#define CN93_R_IN_CTL_ROR (0x1ULL << 0) + +#define CN93_R_IN_CTL_MASK (CN93_R_IN_CTL_RDSIZE | CN93_R_IN_CTL_IS_64B) + +/* ##### RING OUT (out from device to PCI host: Rx Ring) REGISTERS #### */ +#define CN93_SDP_R_OUT_CNTS_START 0x10100 +#define CN93_SDP_R_OUT_INT_LEVELS_START 0x10110 +#define CN93_SDP_R_OUT_SLIST_BADDR_START 0x10120 +#define CN93_SDP_R_OUT_SLIST_RSIZE_START 0x10130 +#define CN93_SDP_R_OUT_SLIST_DBELL_START 0x10140 +#define CN93_SDP_R_OUT_CONTROL_START 0x10150 +#define CN93_SDP_R_OUT_ENABLE_START 0x10160 +#define CN93_SDP_R_OUT_PKT_CNT_START 0x10180 +#define CN93_SDP_R_OUT_BYTE_CNT_START 0x10190 + +#define CN93_SDP_R_OUT_CONTROL(ring) \ + (CN93_SDP_R_OUT_CONTROL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_ENABLE(ring) \ + (CN93_SDP_R_OUT_ENABLE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_BADDR(ring) \ + (CN93_SDP_R_OUT_SLIST_BADDR_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_RSIZE(ring) \ + (CN93_SDP_R_OUT_SLIST_RSIZE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_SLIST_DBELL(ring) \ + (CN93_SDP_R_OUT_SLIST_DBELL_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS(ring) \ + (CN93_SDP_R_OUT_CNTS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_LEVELS(ring) \ + (CN93_SDP_R_OUT_INT_LEVELS_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_PKT_CNT(ring) \ + (CN93_SDP_R_OUT_PKT_CNT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_BYTE_CNT(ring) \ + (CN93_SDP_R_OUT_BYTE_CNT_START + ((ring) * CN93_RING_OFFSET)) + +/*------------------ R_OUT Masks ----------------*/ +#define CN93_R_OUT_INT_LEVELS_BMODE BIT_ULL(63) +#define CN93_R_OUT_INT_LEVELS_TIMET (32) + +#define CN93_R_OUT_CTL_IDLE BIT_ULL(40) +#define CN93_R_OUT_CTL_ES_I BIT_ULL(34) +#define CN93_R_OUT_CTL_NSR_I BIT_ULL(33) +#define CN93_R_OUT_CTL_ROR_I BIT_ULL(32) +#define CN93_R_OUT_CTL_ES_D BIT_ULL(30) +#define CN93_R_OUT_CTL_NSR_D BIT_ULL(29) +#define CN93_R_OUT_CTL_ROR_D BIT_ULL(28) +#define CN93_R_OUT_CTL_ES_P BIT_ULL(26) +#define CN93_R_OUT_CTL_NSR_P BIT_ULL(25) +#define CN93_R_OUT_CTL_ROR_P BIT_ULL(24) +#define CN93_R_OUT_CTL_IMODE BIT_ULL(23) + +/* ############### Interrupt Moderation Registers ############### */ +#define CN93_SDP_R_IN_INT_MDRT_CTL0_START 0x10280 +#define CN93_SDP_R_IN_INT_MDRT_CTL1_START 0x102A0 +#define CN93_SDP_R_IN_INT_MDRT_DBG_START 0x102C0 + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0_START 0x10380 +#define CN93_SDP_R_OUT_INT_MDRT_CTL1_START 0x103A0 +#define CN93_SDP_R_OUT_INT_MDRT_DBG_START 0x103C0 + +#define CN93_SDP_R_IN_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_IN_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_IN_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL0(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL0_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_CTL1(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_CTL1_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_INT_MDRT_DBG(ring) \ + (CN93_SDP_R_OUT_INT_MDRT_DBG_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Mail Box Registers ########################## */ +/* INT register for VF. when a MBOX write from PF happed to a VF, + * corresponding bit will be set in this register as well as in + * PF_VF_INT register. + * + * This is a RO register, the int can be cleared by writing 1 to PF_VF_INT + */ +/* Basically first 3 are from PF to VF. The last one is data from VF to PF */ +#define CN93_SDP_R_MBOX_PF_VF_DATA_START 0x10210 +#define CN93_SDP_R_MBOX_PF_VF_INT_START 0x10220 +#define CN93_SDP_R_MBOX_VF_PF_DATA_START 0x10230 + +#define CN93_SDP_R_MBOX_PF_VF_DATA(ring) \ + (CN93_SDP_R_MBOX_PF_VF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_PF_VF_INT(ring) \ + (CN93_SDP_R_MBOX_PF_VF_INT_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_VF_PF_DATA(ring) \ + (CN93_SDP_R_MBOX_VF_PF_DATA_START + ((ring) * CN93_RING_OFFSET)) + +/* ##################### Interrupt Registers ########################## */ +#define CN93_SDP_R_ERR_TYPE_START 0x10400 + +#define CN93_SDP_R_ERR_TYPE(ring) \ + (CN93_SDP_R_ERR_TYPE_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_MBOX_ISM_START 0x10500 +#define CN93_SDP_R_OUT_CNTS_ISM_START 0x10510 +#define CN93_SDP_R_IN_CNTS_ISM_START 0x10520 + +#define CN93_SDP_R_MBOX_ISM(ring) \ + (CN93_SDP_R_MBOX_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_OUT_CNTS_ISM(ring) \ + (CN93_SDP_R_OUT_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_R_IN_CNTS_ISM(ring) \ + (CN93_SDP_R_IN_CNTS_ISM_START + ((ring) * CN93_RING_OFFSET)) + +#define CN93_SDP_EPF_MBOX_RINT_START 0x20100 +#define CN93_SDP_EPF_MBOX_RINT_W1S_START 0x20120 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START 0x20140 +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START 0x20160 + +#define CN93_SDP_EPF_VFIRE_RINT_START 0x20180 +#define CN93_SDP_EPF_VFIRE_RINT_W1S_START 0x201A0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START 0x201C0 +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START 0x201E0 + +#define CN93_SDP_EPF_IRERR_RINT 0x20200 +#define CN93_SDP_EPF_IRERR_RINT_W1S 0x20210 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1C 0x20220 +#define CN93_SDP_EPF_IRERR_RINT_ENA_W1S 0x20230 + +#define CN93_SDP_EPF_VFORE_RINT_START 0x20240 +#define CN93_SDP_EPF_VFORE_RINT_W1S_START 0x20260 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START 0x20280 +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START 0x202A0 + +#define CN93_SDP_EPF_ORERR_RINT 0x20320 +#define CN93_SDP_EPF_ORERR_RINT_W1S 0x20330 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1C 0x20340 +#define CN93_SDP_EPF_ORERR_RINT_ENA_W1S 0x20350 + +#define CN93_SDP_EPF_OEI_RINT 0x20360 +#define CN93_SDP_EPF_OEI_RINT_W1S 0x20370 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1C 0x20380 +#define CN93_SDP_EPF_OEI_RINT_ENA_W1S 0x20390 + +#define CN93_SDP_EPF_DMA_RINT 0x20400 +#define CN93_SDP_EPF_DMA_RINT_W1S 0x20410 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1C 0x20420 +#define CN93_SDP_EPF_DMA_RINT_ENA_W1S 0x20430 + +#define CN93_SDP_EPF_DMA_INT_LEVEL_START 0x20440 +#define CN93_SDP_EPF_DMA_CNT_START 0x20460 +#define CN93_SDP_EPF_DMA_TIM_START 0x20480 + +#define CN93_SDP_EPF_MISC_RINT 0x204A0 +#define CN93_SDP_EPF_MISC_RINT_W1S 0x204B0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1C 0x204C0 +#define CN93_SDP_EPF_MISC_RINT_ENA_W1S 0x204D0 + +#define CN93_SDP_EPF_DMA_VF_RINT_START 0x204E0 +#define CN93_SDP_EPF_DMA_VF_RINT_W1S_START 0x20500 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START 0x20520 +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START 0x20540 + +#define CN93_SDP_EPF_PP_VF_RINT_START 0x20560 +#define CN93_SDP_EPF_PP_VF_RINT_W1S_START 0x20580 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START 0x205A0 +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START 0x205C0 + +#define CN93_SDP_EPF_MBOX_RINT(index) \ + (CN93_SDP_EPF_MBOX_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_MBOX_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_MBOX_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFIRE_RINT(index) \ + (CN93_SDP_EPF_VFIRE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFIRE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFIRE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_VFORE_RINT(index) \ + (CN93_SDP_EPF_VFORE_RINT_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1C_START + ((index) * CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_VFORE_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_VFORE_RINT_ENA_W1S_START + ((index) * CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_DMA_VF_RINT(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_DMA_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +#define CN93_SDP_EPF_PP_VF_RINT(index) \ + (CN93_SDP_EPF_PP_VF_RINT_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1C(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1C_START + ((index) + CN93_BIT_ARRAY_OFFSET)) +#define CN93_SDP_EPF_PP_VF_RINT_ENA_W1S(index) \ + (CN93_SDP_EPF_PP_VF_RINT_ENA_W1S_START + ((index) + CN93_BIT_ARRAY_OFFSET)) + +/*------------------ Interrupt Masks ----------------*/ +#define CN93_INTR_R_SEND_ISM BIT_ULL(63) +#define CN93_INTR_R_OUT_INT BIT_ULL(62) +#define CN93_INTR_R_IN_INT BIT_ULL(61) +#define CN93_INTR_R_MBOX_INT BIT_ULL(60) +#define CN93_INTR_R_RESEND BIT_ULL(59) +#define CN93_INTR_R_CLR_TIM BIT_ULL(58) + +/* ####################### Ring Mapping Registers ################################## */ +#define CN93_SDP_EPVF_RING_START 0x26000 +#define CN93_SDP_IN_RING_TB_MAP_START 0x28000 +#define CN93_SDP_IN_RATE_LIMIT_START 0x2A000 +#define CN93_SDP_MAC_PF_RING_CTL_START 0x2C000 + +#define CN93_SDP_EPVF_RING(ring) \ + (CN93_SDP_EPVF_RING_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RING_TB_MAP(ring) \ + (CN93_SDP_N_RING_TB_MAP_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_IN_RATE_LIMIT(ring) \ + (CN93_SDP_IN_RATE_LIMIT_START + ((ring) * CN93_EPVF_RING_OFFSET)) +#define CN93_SDP_MAC_PF_RING_CTL(mac) \ + (CN93_SDP_MAC_PF_RING_CTL_START + ((mac) * CN93_MAC_OFFSET)) + +#define CN93_SDP_MAC_PF_RING_CTL_NPFS(val) ((val) & 0xF) +#define CN93_SDP_MAC_PF_RING_CTL_SRN(val) (((val) >> 8) & 0xFF) +#define CN93_SDP_MAC_PF_RING_CTL_RPPF(val) (((val) >> 16) & 0x3F) + +/* Number of non-queue interrupts in CN93xx */ +#define CN93_NUM_NON_IOQ_INTR 16 + +/* bit 0 for control mbox interrupt */ +#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_MBOX BIT_ULL(0) +/* bit 1 for firmware heartbeat interrupt */ +#define CN93_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT BIT_ULL(1) + +#endif /* _OCTEP_REGS_CN9K_PF_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c new file mode 100644 index 0000000000..3c43f80785 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" + +static void octep_oq_reset_indices(struct octep_oq *oq) +{ + oq->host_read_idx = 0; + oq->host_refill_idx = 0; + oq->refill_count = 0; + oq->last_pkt_count = 0; + oq->pkts_pending = 0; +} + +/** + * octep_oq_fill_ring_buffers() - fill initial receive buffers for Rx ring. + * + * @oq: Octeon Rx queue data structure. + * + * Return: 0, if successfully filled receive buffers for all descriptors. + * -1, if failed to allocate a buffer or failed to map for DMA. + */ +static int octep_oq_fill_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 i; + + for (i = 0; i < oq->max_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "Rx buffer alloc failed\n"); + goto rx_buf_alloc_err; + } + desc_ring[i].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[i].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer alloc: DMA mapping error!\n", + oq->q_no); + put_page(page); + goto dma_map_err; + } + oq->buff_info[i].page = page; + } + + return 0; + +dma_map_err: +rx_buf_alloc_err: + while (i) { + i--; + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + } + + return -1; +} + +/** + * octep_oq_refill() - refill buffers for used Rx ring descriptors. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: number of descriptors successfully refilled with receive buffers. + */ +static int octep_oq_refill(struct octep_device *oct, struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + struct page *page; + u32 refill_idx, i; + + refill_idx = oq->host_refill_idx; + for (i = 0; i < oq->refill_count; i++) { + page = dev_alloc_page(); + if (unlikely(!page)) { + dev_err(oq->dev, "refill: rx buffer alloc failed\n"); + oq->stats.alloc_failures++; + break; + } + + desc_ring[refill_idx].buffer_ptr = dma_map_page(oq->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(oq->dev, desc_ring[refill_idx].buffer_ptr)) { + dev_err(oq->dev, + "OQ-%d buffer refill: DMA mapping error!\n", + oq->q_no); + put_page(page); + oq->stats.alloc_failures++; + break; + } + oq->buff_info[refill_idx].page = page; + refill_idx++; + if (refill_idx == oq->max_count) + refill_idx = 0; + } + oq->host_refill_idx = refill_idx; + oq->refill_count -= i; + + return i; +} + +/** + * octep_setup_oq() - Setup a Rx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Rx queue number to be setup. + * + * Allocate resources for a Rx queue. + */ +static int octep_setup_oq(struct octep_device *oct, int q_no) +{ + struct octep_oq *oq; + u32 desc_ring_size; + + oq = vzalloc(sizeof(*oq)); + if (!oq) + goto create_oq_fail; + oct->oq[q_no] = oq; + + oq->octep_dev = oct; + oq->netdev = oct->netdev; + oq->dev = &oct->pdev->dev; + oq->q_no = q_no; + oq->max_count = CFG_GET_OQ_NUM_DESC(oct->conf); + oq->ring_size_mask = oq->max_count - 1; + oq->buffer_size = CFG_GET_OQ_BUF_SIZE(oct->conf); + oq->max_single_buffer_size = oq->buffer_size - OCTEP_OQ_RESP_HW_SIZE; + + /* When the hardware/firmware supports additional capabilities, + * additional header is filled-in by Octeon after length field in + * Rx packets. this header contains additional packet information. + */ + if (oct->caps_enabled) + oq->max_single_buffer_size -= OCTEP_OQ_RESP_HW_EXT_SIZE; + + oq->refill_threshold = CFG_GET_OQ_REFILL_THRESHOLD(oct->conf); + + desc_ring_size = oq->max_count * OCTEP_OQ_DESC_SIZE; + oq->desc_ring = dma_alloc_coherent(oq->dev, desc_ring_size, + &oq->desc_ring_dma, GFP_KERNEL); + + if (unlikely(!oq->desc_ring)) { + dev_err(oq->dev, + "Failed to allocate DMA memory for OQ-%d !!\n", q_no); + goto desc_dma_alloc_err; + } + + oq->buff_info = vcalloc(oq->max_count, OCTEP_OQ_RECVBUF_SIZE); + if (unlikely(!oq->buff_info)) { + dev_err(&oct->pdev->dev, + "Failed to allocate buffer info for OQ-%d\n", q_no); + goto buf_list_err; + } + + if (octep_oq_fill_ring_buffers(oq)) + goto oq_fill_buff_err; + + octep_oq_reset_indices(oq); + oct->hw_ops.setup_oq_regs(oct, q_no); + oct->num_oqs++; + + return 0; + +oq_fill_buff_err: + vfree(oq->buff_info); + oq->buff_info = NULL; +buf_list_err: + dma_free_coherent(oq->dev, desc_ring_size, + oq->desc_ring, oq->desc_ring_dma); + oq->desc_ring = NULL; +desc_dma_alloc_err: + vfree(oq); + oct->oq[q_no] = NULL; +create_oq_fail: + return -1; +} + +/** + * octep_oq_free_ring_buffers() - Free ring buffers. + * + * @oq: Octeon Rx queue data structure. + * + * Free receive buffers in unused Rx queue descriptors. + */ +static void octep_oq_free_ring_buffers(struct octep_oq *oq) +{ + struct octep_oq_desc_hw *desc_ring = oq->desc_ring; + int i; + + if (!oq->desc_ring || !oq->buff_info) + return; + + for (i = 0; i < oq->max_count; i++) { + if (oq->buff_info[i].page) { + dma_unmap_page(oq->dev, desc_ring[i].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + put_page(oq->buff_info[i].page); + oq->buff_info[i].page = NULL; + desc_ring[i].buffer_ptr = 0; + } + } + octep_oq_reset_indices(oq); +} + +/** + * octep_free_oq() - Free Rx queue resources. + * + * @oq: Octeon Rx queue data structure. + * + * Free all resources of a Rx queue. + */ +static int octep_free_oq(struct octep_oq *oq) +{ + struct octep_device *oct = oq->octep_dev; + int q_no = oq->q_no; + + octep_oq_free_ring_buffers(oq); + + vfree(oq->buff_info); + + if (oq->desc_ring) + dma_free_coherent(oq->dev, + oq->max_count * OCTEP_OQ_DESC_SIZE, + oq->desc_ring, oq->desc_ring_dma); + + vfree(oq); + oct->oq[q_no] = NULL; + oct->num_oqs--; + return 0; +} + +/** + * octep_setup_oqs() - setup resources for all Rx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_oqs(struct octep_device *oct) +{ + int i, retval = 0; + + oct->num_oqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + retval = octep_setup_oq(oct, i); + if (retval) { + dev_err(&oct->pdev->dev, + "Failed to setup OQ(RxQ)-%d.\n", i); + goto oq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup OQ(RxQ)-%d.\n", i); + } + + return 0; + +oq_setup_err: + while (i) { + i--; + octep_free_oq(oct->oq[i]); + } + return -1; +} + +/** + * octep_oq_dbell_init() - Initialize Rx queue doorbell. + * + * @oct: Octeon device private data structure. + * + * Write number of descriptors to Rx queue doorbell register. + */ +void octep_oq_dbell_init(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); +} + +/** + * octep_free_oqs() - Free resources of all Rx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_oqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (!oct->oq[i]) + continue; + octep_free_oq(oct->oq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully freed OQ(RxQ)-%d.\n", i); + } +} + +/** + * octep_oq_check_hw_for_pkts() - Check for new Rx packets. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * + * Return: packets received after previous check. + */ +static int octep_oq_check_hw_for_pkts(struct octep_device *oct, + struct octep_oq *oq) +{ + u32 pkt_count, new_pkts; + + pkt_count = readl(oq->pkts_sent_reg); + new_pkts = pkt_count - oq->last_pkt_count; + + /* Clear the hardware packets counter register if the rx queue is + * being processed continuously with-in a single interrupt and + * reached half its max value. + * this counter is not cleared every time read, to save write cycles. + */ + if (unlikely(pkt_count > 0xF0000000U)) { + writel(pkt_count, oq->pkts_sent_reg); + pkt_count = readl(oq->pkts_sent_reg); + new_pkts += pkt_count; + } + oq->last_pkt_count = pkt_count; + oq->pkts_pending += new_pkts; + return new_pkts; +} + +/** + * __octep_oq_process_rx() - Process hardware Rx queue and push to stack. + * + * @oct: Octeon device private data structure. + * @oq: Octeon Rx queue data structure. + * @pkts_to_process: number of packets to be processed. + * + * Process the new packets in Rx queue. + * Packets larger than single Rx buffer arrive in consecutive descriptors. + * But, count returned by the API only accounts full packets, not fragments. + * + * Return: number of packets processed and pushed to stack. + */ +static int __octep_oq_process_rx(struct octep_device *oct, + struct octep_oq *oq, u16 pkts_to_process) +{ + struct octep_oq_resp_hw_ext *resp_hw_ext = NULL; + struct octep_rx_buffer *buff_info; + struct octep_oq_resp_hw *resp_hw; + u32 pkt, rx_bytes, desc_used; + struct sk_buff *skb; + u16 data_offset; + u32 read_idx; + + read_idx = oq->host_read_idx; + rx_bytes = 0; + desc_used = 0; + for (pkt = 0; pkt < pkts_to_process; pkt++) { + buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx]; + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + resp_hw = page_address(buff_info->page); + buff_info->page = NULL; + + /* Swap the length field that is in Big-Endian to CPU */ + buff_info->len = be64_to_cpu(resp_hw->length); + if (oct->caps_enabled & OCTEP_CAP_RX_CHECKSUM) { + /* Extended response header is immediately after + * response header (resp_hw) + */ + resp_hw_ext = (struct octep_oq_resp_hw_ext *) + (resp_hw + 1); + buff_info->len -= OCTEP_OQ_RESP_HW_EXT_SIZE; + /* Packet Data is immediately after + * extended response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE + + OCTEP_OQ_RESP_HW_EXT_SIZE; + } else { + /* Data is immediately after + * Hardware Rx response header. + */ + data_offset = OCTEP_OQ_RESP_HW_SIZE; + } + rx_bytes += buff_info->len; + + if (buff_info->len <= oq->max_single_buffer_size) { + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + skb_put(skb, buff_info->len); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } else { + struct skb_shared_info *shinfo; + u16 data_len; + + skb = build_skb((void *)resp_hw, PAGE_SIZE); + skb_reserve(skb, data_offset); + /* Head fragment includes response header(s); + * subsequent fragments contains only data. + */ + skb_put(skb, oq->max_single_buffer_size); + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + + shinfo = skb_shinfo(skb); + data_len = buff_info->len - oq->max_single_buffer_size; + while (data_len) { + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, + PAGE_SIZE, DMA_FROM_DEVICE); + buff_info = (struct octep_rx_buffer *) + &oq->buff_info[read_idx]; + if (data_len < oq->buffer_size) { + buff_info->len = data_len; + data_len = 0; + } else { + buff_info->len = oq->buffer_size; + data_len -= oq->buffer_size; + } + + skb_add_rx_frag(skb, shinfo->nr_frags, + buff_info->page, 0, + buff_info->len, + buff_info->len); + buff_info->page = NULL; + read_idx++; + desc_used++; + if (read_idx == oq->max_count) + read_idx = 0; + } + } + + skb->dev = oq->netdev; + skb->protocol = eth_type_trans(skb, skb->dev); + if (resp_hw_ext && + resp_hw_ext->csum_verified == OCTEP_CSUM_VERIFIED) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + napi_gro_receive(oq->napi, skb); + } + + oq->host_read_idx = read_idx; + oq->refill_count += desc_used; + oq->stats.packets += pkt; + oq->stats.bytes += rx_bytes; + + return pkt; +} + +/** + * octep_oq_process_rx() - Process Rx queue. + * + * @oq: Octeon Rx queue data structure. + * @budget: max number of packets can be processed in one invocation. + * + * Check for newly received packets and process them. + * Keeps checking for new packets until budget is used or no new packets seen. + * + * Return: number of packets processed. + */ +int octep_oq_process_rx(struct octep_oq *oq, int budget) +{ + u32 pkts_available, pkts_processed, total_pkts_processed; + struct octep_device *oct = oq->octep_dev; + + pkts_available = 0; + pkts_processed = 0; + total_pkts_processed = 0; + while (total_pkts_processed < budget) { + /* update pending count only when current one exhausted */ + if (oq->pkts_pending == 0) + octep_oq_check_hw_for_pkts(oct, oq); + pkts_available = min(budget - total_pkts_processed, + oq->pkts_pending); + if (!pkts_available) + break; + + pkts_processed = __octep_oq_process_rx(oct, oq, + pkts_available); + oq->pkts_pending -= pkts_processed; + total_pkts_processed += pkts_processed; + } + + if (oq->refill_count >= oq->refill_threshold) { + u32 desc_refilled = octep_oq_refill(oct, oq); + + /* flush pending writes before updating credits */ + wmb(); + writel(desc_refilled, oq->pkts_credit_reg); + } + + return total_pkts_processed; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h new file mode 100644 index 0000000000..782a24f27f --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.h @@ -0,0 +1,199 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_RX_H_ +#define _OCTEP_RX_H_ + +/* struct octep_oq_desc_hw - Octeon Hardware OQ descriptor format. + * + * The descriptor ring is made of descriptors which have 2 64-bit values: + * + * @buffer_ptr: DMA address of the skb->data + * @info_ptr: DMA address of host memory, used to update pkt count by hw. + * This is currently unused to save pci writes. + */ +struct octep_oq_desc_hw { + dma_addr_t buffer_ptr; + u64 info_ptr; +}; + +#define OCTEP_OQ_DESC_SIZE (sizeof(struct octep_oq_desc_hw)) + +#define OCTEP_CSUM_L4_VERIFIED 0x1 +#define OCTEP_CSUM_IP_VERIFIED 0x2 +#define OCTEP_CSUM_VERIFIED (OCTEP_CSUM_L4_VERIFIED | OCTEP_CSUM_IP_VERIFIED) + +/* Extended Response Header in packet data received from Hardware. + * Includes metadata like checksum status. + * this is valid only if hardware/firmware published support for this. + * This is at offset 0 of packet data (skb->data). + */ +struct octep_oq_resp_hw_ext { + /* Reserved. */ + u64 reserved:62; + + /* checksum verified. */ + u64 csum_verified:2; +}; + +#define OCTEP_OQ_RESP_HW_EXT_SIZE (sizeof(struct octep_oq_resp_hw_ext)) + +/* Length of Rx packet DMA'ed by Octeon to Host. + * this is in bigendian; so need to be converted to cpu endian. + * Octeon writes this at the beginning of Rx buffer (skb->data). + */ +struct octep_oq_resp_hw { + /* The Length of the packet. */ + __be64 length; +}; + +#define OCTEP_OQ_RESP_HW_SIZE (sizeof(struct octep_oq_resp_hw)) + +/* Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. The fields are operated by + * OS-dependent routines. + */ +struct octep_rx_buffer { + struct page *page; + + /* length from rx hardware descriptor after converting to cpu endian */ + u64 len; +}; + +#define OCTEP_OQ_RECVBUF_SIZE (sizeof(struct octep_rx_buffer)) + +/* Output Queue statistics. Each output queue has four stats fields. */ +struct octep_oq_stats { + /* Number of packets received from the Device. */ + u64 packets; + + /* Number of bytes received from the Device. */ + u64 bytes; + + /* Number of times failed to allocate buffers. */ + u64 alloc_failures; +}; + +#define OCTEP_OQ_STATS_SIZE (sizeof(struct octep_oq_stats)) + +/* Hardware interface Rx statistics */ +struct octep_iface_rx_stats { + /* Received packets */ + u64 pkts; + + /* Octets of received packets */ + u64 octets; + + /* Received PAUSE and Control packets */ + u64 pause_pkts; + + /* Received PAUSE and Control octets */ + u64 pause_octets; + + /* Filtered DMAC0 packets */ + u64 dmac0_pkts; + + /* Filtered DMAC0 octets */ + u64 dmac0_octets; + + /* Packets dropped due to RX FIFO full */ + u64 dropped_pkts_fifo_full; + + /* Octets dropped due to RX FIFO full */ + u64 dropped_octets_fifo_full; + + /* Error packets */ + u64 err_pkts; + + /* Filtered DMAC1 packets */ + u64 dmac1_pkts; + + /* Filtered DMAC1 octets */ + u64 dmac1_octets; + + /* NCSI-bound packets dropped */ + u64 ncsi_dropped_pkts; + + /* NCSI-bound octets dropped */ + u64 ncsi_dropped_octets; + + /* Multicast packets received. */ + u64 mcast_pkts; + + /* Broadcast packets received. */ + u64 bcast_pkts; + +}; + +/* The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon OQ. + */ +struct octep_oq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + + struct napi_struct *napi; + + /* The receive buffer list. This list has the virtual addresses + * of the buffers. + */ + struct octep_rx_buffer *buff_info; + + /* Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + u8 __iomem *pkts_credit_reg; + + /* Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + u8 __iomem *pkts_sent_reg; + + /* Statistics for this OQ. */ + struct octep_oq_stats stats; + + /* Packets pending to be processed */ + u32 pkts_pending; + u32 last_pkt_count; + + /* Index in the ring where the driver should read the next packet */ + u32 host_read_idx; + + /* Number of descriptors in this ring. */ + u32 max_count; + u32 ring_size_mask; + + /* The number of descriptors pending refill. */ + u32 refill_count; + + /* Index in the ring where the driver will refill the + * descriptor's buffer + */ + u32 host_refill_idx; + u32 refill_threshold; + + /* The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + u32 max_single_buffer_size; + + /* The 8B aligned descriptor ring starts at this address. */ + struct octep_oq_desc_hw *desc_ring; + + /* DMA mapped address of the OQ descriptor ring. */ + dma_addr_t desc_ring_dma; +}; + +#define OCTEP_OQ_SIZE (sizeof(struct octep_oq)) +#endif /* _OCTEP_RX_H_ */ diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c new file mode 100644 index 0000000000..d0adb82d65 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#include <linux/pci.h> +#include <linux/etherdevice.h> +#include <linux/vmalloc.h> + +#include "octep_config.h" +#include "octep_main.h" + +/* Reset various index of Tx queue data structure. */ +static void octep_iq_reset_indices(struct octep_iq *iq) +{ + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octep_read_index = 0; + iq->flush_index = 0; + iq->pkts_processed = 0; + iq->pkt_in_done = 0; + atomic_set(&iq->instr_pending, 0); +} + +/** + * octep_iq_process_completions() - Process Tx queue completions. + * + * @iq: Octeon Tx queue data structure. + * @budget: max number of completions to be processed in one invocation. + */ +int octep_iq_process_completions(struct octep_iq *iq, u16 budget) +{ + u32 compl_pkts, compl_bytes, compl_sg; + struct octep_device *oct = iq->octep_dev; + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + compl_pkts = 0; + compl_sg = 0; + compl_bytes = 0; + iq->octep_read_index = oct->hw_ops.update_iq_read_idx(iq); + + while (likely(budget && (fi != iq->octep_read_index))) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + compl_bytes += skb->len; + compl_pkts++; + budget--; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + compl_sg++; + + dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[3], DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + iq->pkts_processed += compl_pkts; + atomic_sub(compl_pkts, &iq->instr_pending); + iq->stats.instr_completed += compl_pkts; + iq->stats.bytes_sent += compl_bytes; + iq->stats.sgentry_sent += compl_sg; + iq->flush_index = fi; + + netdev_tx_completed_queue(iq->netdev_q, compl_pkts, compl_bytes); + + if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && + ((iq->max_count - atomic_read(&iq->instr_pending)) > + OCTEP_WAKE_QUEUE_THRESHOLD)) + netif_wake_subqueue(iq->netdev, iq->q_no); + return !budget; +} + +/** + * octep_iq_free_pending() - Free Tx buffers for pending completions. + * + * @iq: Octeon Tx queue data structure. + */ +static void octep_iq_free_pending(struct octep_iq *iq) +{ + struct octep_tx_buffer *tx_buffer; + struct skb_shared_info *shinfo; + u32 fi = iq->flush_index; + struct sk_buff *skb; + u8 frags, i; + + while (fi != iq->host_write_index) { + tx_buffer = iq->buff_info + fi; + skb = tx_buffer->skb; + + fi++; + if (unlikely(fi == iq->max_count)) + fi = 0; + + if (!tx_buffer->gather) { + dma_unmap_single(iq->dev, tx_buffer->dma, + tx_buffer->skb->len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + continue; + } + + /* Scatter/Gather */ + shinfo = skb_shinfo(skb); + frags = shinfo->nr_frags; + + dma_unmap_single(iq->dev, + tx_buffer->sglist[0].dma_ptr[0], + tx_buffer->sglist[0].len[3], + DMA_TO_DEVICE); + + i = 1; /* entry 0 is main skb, unmapped above */ + while (frags--) { + dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3], + tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE); + i++; + } + + dev_kfree_skb_any(skb); + } + + atomic_set(&iq->instr_pending, 0); + iq->flush_index = fi; + netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); +} + +/** + * octep_clean_iqs() - Clean Tx queues to shutdown the device. + * + * @oct: Octeon device private data structure. + * + * Free the buffers in Tx queue descriptors pending completion and + * reset queue indices + */ +void octep_clean_iqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + octep_iq_free_pending(oct->iq[i]); + octep_iq_reset_indices(oct->iq[i]); + } +} + +/** + * octep_setup_iq() - Setup a Tx queue. + * + * @oct: Octeon device private data structure. + * @q_no: Tx queue number to be setup. + * + * Allocate resources for a Tx queue. + */ +static int octep_setup_iq(struct octep_device *oct, int q_no) +{ + u32 desc_ring_size, buff_info_size, sglist_size; + struct octep_iq *iq; + int i; + + iq = vzalloc(sizeof(*iq)); + if (!iq) + goto iq_alloc_err; + oct->iq[q_no] = iq; + + iq->octep_dev = oct; + iq->netdev = oct->netdev; + iq->dev = &oct->pdev->dev; + iq->q_no = q_no; + iq->max_count = CFG_GET_IQ_NUM_DESC(oct->conf); + iq->ring_size_mask = iq->max_count - 1; + iq->fill_threshold = CFG_GET_IQ_DB_MIN(oct->conf); + iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); + + /* Allocate memory for hardware queue descriptors */ + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + iq->desc_ring = dma_alloc_coherent(iq->dev, desc_ring_size, + &iq->desc_ring_dma, GFP_KERNEL); + if (unlikely(!iq->desc_ring)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d\n", q_no); + goto desc_dma_alloc_err; + } + + /* Allocate memory for hardware SGLIST descriptors */ + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + iq->sglist = dma_alloc_coherent(iq->dev, sglist_size, + &iq->sglist_dma, GFP_KERNEL); + if (unlikely(!iq->sglist)) { + dev_err(iq->dev, + "Failed to allocate DMA memory for IQ-%d SGLIST\n", + q_no); + goto sglist_alloc_err; + } + + /* allocate memory to manage Tx packets pending completion */ + buff_info_size = OCTEP_IQ_TXBUFF_INFO_SIZE * iq->max_count; + iq->buff_info = vzalloc(buff_info_size); + if (!iq->buff_info) { + dev_err(iq->dev, + "Failed to allocate buff info for IQ-%d\n", q_no); + goto buff_info_err; + } + + /* Setup sglist addresses in tx_buffer entries */ + for (i = 0; i < CFG_GET_IQ_NUM_DESC(oct->conf); i++) { + struct octep_tx_buffer *tx_buffer; + + tx_buffer = &iq->buff_info[i]; + tx_buffer->sglist = + &iq->sglist[i * OCTEP_SGLIST_ENTRIES_PER_PKT]; + tx_buffer->sglist_dma = + iq->sglist_dma + (i * OCTEP_SGLIST_SIZE_PER_PKT); + } + + octep_iq_reset_indices(iq); + oct->hw_ops.setup_iq_regs(oct, q_no); + + oct->num_iqs++; + return 0; + +buff_info_err: + dma_free_coherent(iq->dev, sglist_size, iq->sglist, iq->sglist_dma); +sglist_alloc_err: + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); +desc_dma_alloc_err: + vfree(iq); + oct->iq[q_no] = NULL; +iq_alloc_err: + return -1; +} + +/** + * octep_free_iq() - Free Tx queue resources. + * + * @iq: Octeon Tx queue data structure. + * + * Free all the resources allocated for a Tx queue. + */ +static void octep_free_iq(struct octep_iq *iq) +{ + struct octep_device *oct = iq->octep_dev; + u64 desc_ring_size, sglist_size; + int q_no = iq->q_no; + + desc_ring_size = OCTEP_IQ_DESC_SIZE * CFG_GET_IQ_NUM_DESC(oct->conf); + + vfree(iq->buff_info); + + if (iq->desc_ring) + dma_free_coherent(iq->dev, desc_ring_size, + iq->desc_ring, iq->desc_ring_dma); + + sglist_size = OCTEP_SGLIST_SIZE_PER_PKT * + CFG_GET_IQ_NUM_DESC(oct->conf); + if (iq->sglist) + dma_free_coherent(iq->dev, sglist_size, + iq->sglist, iq->sglist_dma); + + vfree(iq); + oct->iq[q_no] = NULL; + oct->num_iqs--; +} + +/** + * octep_setup_iqs() - setup resources for all Tx queues. + * + * @oct: Octeon device private data structure. + */ +int octep_setup_iqs(struct octep_device *oct) +{ + int i; + + oct->num_iqs = 0; + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + if (octep_setup_iq(oct, i)) { + dev_err(&oct->pdev->dev, + "Failed to setup IQ(TxQ)-%d.\n", i); + goto iq_setup_err; + } + dev_dbg(&oct->pdev->dev, "Successfully setup IQ(TxQ)-%d.\n", i); + } + + return 0; + +iq_setup_err: + while (i) { + i--; + octep_free_iq(oct->iq[i]); + } + return -1; +} + +/** + * octep_free_iqs() - Free resources of all Tx queues. + * + * @oct: Octeon device private data structure. + */ +void octep_free_iqs(struct octep_device *oct) +{ + int i; + + for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) { + octep_free_iq(oct->iq[i]); + dev_dbg(&oct->pdev->dev, + "Successfully destroyed IQ(TxQ)-%d.\n", i); + } + oct->num_iqs = 0; +} diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h new file mode 100644 index 0000000000..21e75ff9f5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h @@ -0,0 +1,298 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell Octeon EP (EndPoint) Ethernet Driver + * + * Copyright (C) 2020 Marvell. + * + */ + +#ifndef _OCTEP_TX_H_ +#define _OCTEP_TX_H_ + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +#define TX_BUFTYPE_NONE 0 +#define TX_BUFTYPE_NET 1 +#define TX_BUFTYPE_NET_SG 2 +#define NUM_TX_BUFTYPES 3 + +/* Hardware format for Scatter/Gather list + * + * 63 48|47 32|31 16|15 0 + * ----------------------------------------- + * | Len 0 | Len 1 | Len 2 | Len 3 | + * ----------------------------------------- + * | Ptr 0 | + * ----------------------------------------- + * | Ptr 1 | + * ----------------------------------------- + * | Ptr 2 | + * ----------------------------------------- + * | Ptr 3 | + * ----------------------------------------- + */ +struct octep_tx_sglist_desc { + u16 len[4]; + dma_addr_t dma_ptr[4]; +}; + +/* Each Scatter/Gather entry sent to hardwar hold four pointers. + * So, number of entries required is (MAX_SKB_FRAGS + 1)/4, where '+1' + * is for main skb which also goes as a gather buffer to Octeon hardware. + * To allocate sufficient SGLIST entries for a packet with max fragments, + * align by adding 3 before calcuating max SGLIST entries per packet. + */ +#define OCTEP_SGLIST_ENTRIES_PER_PKT ((MAX_SKB_FRAGS + 1 + 3) / 4) +#define OCTEP_SGLIST_SIZE_PER_PKT \ + (OCTEP_SGLIST_ENTRIES_PER_PKT * sizeof(struct octep_tx_sglist_desc)) + +struct octep_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct octep_tx_sglist_desc *sglist; + dma_addr_t sglist_dma; + u8 gather; +}; + +#define OCTEP_IQ_TXBUFF_INFO_SIZE (sizeof(struct octep_tx_buffer)) + +/* Hardware interface Tx statistics */ +struct octep_iface_tx_stats { + /* Packets dropped due to excessive collisions */ + u64 xscol; + + /* Packets dropped due to excessive deferral */ + u64 xsdef; + + /* Packets sent that experienced multiple collisions before successful + * transmission + */ + u64 mcol; + + /* Packets sent that experienced a single collision before successful + * transmission + */ + u64 scol; + + /* Total octets sent on the interface */ + u64 octs; + + /* Total frames sent on the interface */ + u64 pkts; + + /* Packets sent with an octet count < 64 */ + u64 hist_lt64; + + /* Packets sent with an octet count == 64 */ + u64 hist_eq64; + + /* Packets sent with an octet count of 65–127 */ + u64 hist_65to127; + + /* Packets sent with an octet count of 128–255 */ + u64 hist_128to255; + + /* Packets sent with an octet count of 256–511 */ + u64 hist_256to511; + + /* Packets sent with an octet count of 512–1023 */ + u64 hist_512to1023; + + /* Packets sent with an octet count of 1024-1518 */ + u64 hist_1024to1518; + + /* Packets sent with an octet count of > 1518 */ + u64 hist_gt1518; + + /* Packets sent to a broadcast DMAC */ + u64 bcst; + + /* Packets sent to the multicast DMAC */ + u64 mcst; + + /* Packets sent that experienced a transmit underflow and were + * truncated + */ + u64 undflw; + + /* Control/PAUSE packets sent */ + u64 ctl; +}; + +/* Input Queue statistics. Each input queue has four stats fields. */ +struct octep_iq_stats { + /* Instructions posted to this queue. */ + u64 instr_posted; + + /* Instructions copied by hardware for processing. */ + u64 instr_completed; + + /* Instructions that could not be processed. */ + u64 instr_dropped; + + /* Bytes sent through this queue. */ + u64 bytes_sent; + + /* Gather entries sent through this queue. */ + u64 sgentry_sent; + + /* Number of transmit failures due to TX_BUSY */ + u64 tx_busy; + + /* Number of times the queue is restarted */ + u64 restart_cnt; +}; + +/* The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (up to 4) for + * a Octeon device has one such structure to represent it. + */ +struct octep_iq { + u32 q_no; + + struct octep_device *octep_dev; + struct net_device *netdev; + struct device *dev; + struct netdev_queue *netdev_q; + + /* Index in input ring where driver should write the next packet */ + u16 host_write_index; + + /* Index in input ring where Octeon is expected to read next packet */ + u16 octep_read_index; + + /* This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u16 flush_index; + + /* Statistics for this input queue. */ + struct octep_iq_stats stats; + + /* This field keeps track of the instructions pending in this queue. */ + atomic_t instr_pending; + + /* Pointer to the Virtual Base addr of the input ring. */ + struct octep_tx_desc_hw *desc_ring; + + /* DMA mapped base address of the input descriptor ring. */ + dma_addr_t desc_ring_dma; + + /* Info of Tx buffers pending completion. */ + struct octep_tx_buffer *buff_info; + + /* Base pointer to Scatter/Gather lists for all ring descriptors. */ + struct octep_tx_sglist_desc *sglist; + + /* DMA mapped addr of Scatter Gather Lists */ + dma_addr_t sglist_dma; + + /* Octeon doorbell register for the ring. */ + u8 __iomem *doorbell_reg; + + /* Octeon instruction count register for this ring. */ + u8 __iomem *inst_cnt_reg; + + /* interrupt level register for this ring */ + u8 __iomem *intr_lvl_reg; + + /* Maximum no. of instructions in this queue. */ + u32 max_count; + u32 ring_size_mask; + + u32 pkt_in_done; + u32 pkts_processed; + + u32 status; + + /* Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /* The max. number of instructions that can be held pending by the + * driver before ringing doorbell. + */ + u32 fill_threshold; +}; + +/* Hardware Tx Instruction Header */ +struct octep_instr_hdr { + /* Data Len */ + u64 tlen:16; + + /* Reserved */ + u64 rsvd:20; + + /* PKIND for SDP */ + u64 pkind:6; + + /* Front Data size */ + u64 fsz:6; + + /* No. of entries in gather list */ + u64 gsz:14; + + /* Gather indicator 1=gather*/ + u64 gather:1; + + /* Reserved3 */ + u64 reserved3:1; +}; + +/* Hardware Tx completion response header */ +struct octep_instr_resp_hdr { + /* Request ID */ + u64 rid:16; + + /* PCIe port to use for response */ + u64 pcie_port:3; + + /* Scatter indicator 1=scatter */ + u64 scatter:1; + + /* Size of Expected result OR no. of entries in scatter list */ + u64 rlenssz:14; + + /* Desired destination port for result */ + u64 dport:6; + + /* Opcode Specific parameters */ + u64 param:8; + + /* Opcode for the return packet */ + u64 opcode:16; +}; + +/* 64-byte Tx instruction format. + * Format of instruction for a 64-byte mode input queue. + * + * only first 16-bytes (dptr and ih) are mandatory; rest are optional + * and filled by the driver based on firmware/hardware capabilities. + * These optional headers together called Front Data and its size is + * described by ih->fsz. + */ +struct octep_tx_desc_hw { + /* Pointer where the input data is available. */ + u64 dptr; + + /* Instruction Header. */ + union { + struct octep_instr_hdr ih; + u64 ih64; + }; + + /* Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + /* Input Instruction Response Header. */ + struct octep_instr_resp_hdr irh; + + /* Additional headers available in a 64-byte instruction. */ + u64 exhdr[4]; +}; + +#define OCTEP_IQ_DESC_SIZE (sizeof(struct octep_tx_desc_hw)) +#endif /* _OCTEP_TX_H_ */ |