diff options
Diffstat (limited to 'drivers/net/ethernet/cavium/liquidio')
39 files changed, 27432 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile new file mode 100644 index 000000000..bc9937502 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/Makefile @@ -0,0 +1,24 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Cavium Liquidio ethernet device driver +# + +common-objs := lio_ethtool.o \ + lio_core.o \ + request_manager.o \ + response_manager.o \ + octeon_device.o \ + cn66xx_device.o \ + cn68xx_device.o \ + cn23xx_pf_device.o \ + cn23xx_vf_device.o \ + octeon_mailbox.o \ + octeon_mem_ops.o \ + octeon_droq.o \ + octeon_nic.o + +obj-$(CONFIG_LIQUIDIO) += liquidio.o +liquidio-y := lio_main.o octeon_console.o lio_vf_rep.o $(common-objs) + +obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o +liquidio_vf-y := lio_vf_main.o $(common-objs) diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c new file mode 100644 index 000000000..9ed3d1ab2 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -0,0 +1,1512 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/etherdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "cn23xx_pf_device.h" +#include "octeon_main.h" +#include "octeon_mailbox.h" + +#define RESET_NOTDONE 0 +#define RESET_DONE 1 + +/* Change the value of SLI Packet Input Jabber Register to allow + * VXLAN TSO packets which can be 64424 bytes, exceeding the + * MAX_GSO_SIZE we supplied to the kernel + */ +#define CN23XX_INPUT_JABBER 64600 + +void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) +{ + int i = 0; + u32 regval = 0; + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + + /*In cn23xx_soft_reset*/ + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n", + "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG))); + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1))); + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST, + lio_pci_readq(oct, CN23XX_RST_SOFT_RST)); + + /*In cn23xx_set_dpi_regs*/ + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL, + lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL)); + + for (i = 0; i < 6; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_DPI_DMA_ENG_ENB", i, + CN23XX_DPI_DMA_ENG_ENB(i), + lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_DPI_DMA_ENG_BUF", i, + CN23XX_DPI_DMA_ENG_BUF(i), + lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i))); + } + + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL", + CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL)); + + /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */ + pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_CONFIG_PCIE_DEVCTL", + CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval)); + + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port, + CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port), + lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port))); + + /*In cn23xx_specific_regs_setup */ + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port, + CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)))); + + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST), + (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST)); + + /*In cn23xx_setup_global_mac_regs*/ + for (i = 0; i < CN23XX_MAX_MACS; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_PKT_MAC_RINFO64", i, + CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)), + CVM_CAST64(octeon_read_csr64 + (oct, CN23XX_SLI_PKT_MAC_RINFO64 + (i, oct->pf_num)))); + } + + /*In cn23xx_setup_global_input_regs*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_IQ_PKT_CONTROL64", i, + CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)), + CVM_CAST64(octeon_read_csr64 + (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i)))); + } + + /*In cn23xx_setup_global_output_regs*/ + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK))); + + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_PKT_CONTROL", i, + CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)), + CVM_CAST64(octeon_read_csr( + oct, CN23XX_SLI_OQ_PKT_CONTROL(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_PKT_INT_LEVELS", i, + CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i)))); + } + + /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/ + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "cn23xx->intr_enb_reg64", + CVM_CAST64((long)(cn23xx->intr_enb_reg64)), + CVM_CAST64(readq(cn23xx->intr_enb_reg64))); + + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "cn23xx->intr_sum_reg64", + CVM_CAST64((long)(cn23xx->intr_sum_reg64)), + CVM_CAST64(readq(cn23xx->intr_sum_reg64))); + + /*In cn23xx_setup_iq_regs*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_IQ_BASE_ADDR64", i, + CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_IQ_BASE_ADDR64(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_IQ_SIZE", i, + CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)), + CVM_CAST64(octeon_read_csr + (oct, CN23XX_SLI_IQ_SIZE(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_IQ_DOORBELL", i, + CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_IQ_DOORBELL(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_IQ_INSTR_COUNT64", i, + CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_IQ_INSTR_COUNT64(i)))); + } + + /*In cn23xx_setup_oq_regs*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_BASE_ADDR64", i, + CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_OQ_BASE_ADDR64(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_SIZE", i, + CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)), + CVM_CAST64(octeon_read_csr + (oct, CN23XX_SLI_OQ_SIZE(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i, + CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)), + CVM_CAST64(octeon_read_csr( + oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_PKTS_SENT", i, + CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_OQ_PKTS_SENT(i)))); + dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n", + "CN23XX_SLI_OQ_PKTS_CREDIT", i, + CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)), + CVM_CAST64(octeon_read_csr64( + oct, CN23XX_SLI_OQ_PKTS_CREDIT(i)))); + } + + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_SLI_PKT_TIME_INT", + CVM_CAST64(CN23XX_SLI_PKT_TIME_INT), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT))); + dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", + "CN23XX_SLI_PKT_CNT_INT", + CVM_CAST64(CN23XX_SLI_PKT_CNT_INT), + CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT))); +} + +static int cn23xx_pf_soft_reset(struct octeon_device *oct) +{ + octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF); + + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n", + oct->octeon_id); + + octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL); + + /* Initiate chip-wide soft reset */ + lio_pci_readq(oct, CN23XX_RST_SOFT_RST); + lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST); + + /* Wait for 100ms as Octeon resets. */ + mdelay(100); + + if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) { + dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n", + oct->octeon_id); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n", + oct->octeon_id); + + /* restore the reset value*/ + octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF); + + return 0; +} + +static void cn23xx_enable_error_reporting(struct octeon_device *oct) +{ + u32 regval; + u32 uncorrectable_err_mask, corrtable_err_status; + + pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, ®val); + if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) { + uncorrectable_err_mask = 0; + corrtable_err_status = 0; + pci_read_config_dword(oct->pci_dev, + CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK, + &uncorrectable_err_mask); + pci_read_config_dword(oct->pci_dev, + CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS, + &corrtable_err_status); + dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n" + "\tdev_ctl_status_reg = 0x%08x\n" + "\tuncorrectable_error_mask_reg = 0x%08x\n" + "\tcorrectable_error_status_reg = 0x%08x\n", + regval, uncorrectable_err_mask, + corrtable_err_status); + } + + regval |= 0xf; /* Enable Link error reporting */ + + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n", + oct->octeon_id); + pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval); +} + +static u32 cn23xx_coprocessor_clock(struct octeon_device *oct) +{ + /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER + * for SLI. + */ + + /* TBD: get the info in Hand-shake */ + return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50); +} + +u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) +{ + /* This gives the SLI clock per microsec */ + u32 oqticks_per_us = cn23xx_coprocessor_clock(oct); + + oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us; + + /* This gives the clock cycles per millisecond */ + oqticks_per_us *= 1000; + + /* This gives the oq ticks (1024 core clock cycles) per millisecond */ + oqticks_per_us /= 1024; + + /* time_intr is in microseconds. The next 2 steps gives the oq ticks + * corressponding to time_intr. + */ + oqticks_per_us *= time_intr_in_us; + oqticks_per_us /= 1000; + + return oqticks_per_us; +} + +static void cn23xx_setup_global_mac_regs(struct octeon_device *oct) +{ + u16 mac_no = oct->pcie_port; + u16 pf_num = oct->pf_num; + u64 reg_val; + u64 temp; + + /* programming SRN and TRS for each MAC(0..3) */ + + dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n", + __func__, mac_no); + /* By default, mapping all 64 IOQs to a single MACs */ + + reg_val = + octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)); + + if (oct->rev_id == OCTEON_CN23XX_REV_1_1) { + /* setting SRN <6:0> */ + reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1; + } else { + /* setting SRN <6:0> */ + reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF; + } + + /* setting TRS <23:16> */ + reg_val = reg_val | + (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS); + /* setting RPVF <39:32> */ + temp = oct->sriov_info.rings_per_vf & 0xff; + reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS); + + /* setting NVFS <55:48> */ + temp = oct->sriov_info.max_vfs & 0xff; + reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS); + + /* write these settings to MAC register */ + octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num), + reg_val); + + dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n", + mac_no, pf_num, (u64)octeon_read_csr64 + (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num))); +} + +static int cn23xx_reset_io_queues(struct octeon_device *oct) +{ + int ret_val = 0; + u64 d64; + u32 q_no, srn, ern; + u32 loop = 1000; + + srn = oct->sriov_info.pf_srn; + ern = srn + oct->sriov_info.num_pf_rings; + + /*As per HRM reg description, s/w cant write 0 to ENB. */ + /*to make the queue off, need to set the RST bit. */ + + /* Reset the Enable bit for all the 64 IQs. */ + for (q_no = srn; q_no < ern; q_no++) { + /* set RST bit to 1. This bit applies to both IQ and OQ */ + d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + d64 = d64 | CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); + } + + /*wait until the RST bit is clear or the RST and quite bits are set*/ + for (q_no = srn; q_no < ern; q_no++) { + u64 reg_val = octeon_read_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) && + !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) && + loop--) { + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + WRITE_ONCE(reg_val, READ_ONCE(reg_val) & + ~CN23XX_PKT_INPUT_CTL_RST); + octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + READ_ONCE(reg_val)); + + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); + if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "clearing the reset failed for qno: %u\n", + q_no); + ret_val = -1; + } + } + + return ret_val; +} + +static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) +{ + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + struct octeon_instr_queue *iq; + u64 intr_threshold, reg_val; + u32 q_no, ern, srn; + u64 pf_num; + u64 vf_num; + + pf_num = oct->pf_num; + + srn = oct->sriov_info.pf_srn; + ern = srn + oct->sriov_info.num_pf_rings; + + if (cn23xx_reset_io_queues(oct)) + return -1; + + /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg + * for all queues.Only PF can set these bits. + * bits 29:30 indicate the MAC num. + * bits 32:47 indicate the PVF num. + */ + for (q_no = 0; q_no < ern; q_no++) { + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + + /* for VF assigned queues. */ + if (q_no < oct->sriov_info.pf_srn) { + vf_num = q_no / oct->sriov_info.rings_per_vf; + vf_num += 1; /* VF1, VF2,........ */ + } else { + vf_num = 0; + } + + reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS; + reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS; + + octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + } + + /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for + * pf queues + */ + for (q_no = srn; q_no < ern; q_no++) { + void __iomem *inst_cnt_reg; + + iq = oct->instr_queue[q_no]; + if (iq) + inst_cnt_reg = iq->inst_cnt_reg; + else + inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_IQ_INSTR_COUNT64(q_no); + + reg_val = + octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + + reg_val |= CN23XX_PKT_INPUT_CTL_MASK; + + octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + + /* Set WMARK level for triggering PI_INT */ + /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */ + intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & + CN23XX_PKT_IN_DONE_WMARK_MASK; + + writeq((readq(inst_cnt_reg) & + ~(CN23XX_PKT_IN_DONE_WMARK_MASK << + CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | + (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS), + inst_cnt_reg); + } + return 0; +} + +static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct) +{ + u32 reg_val; + u32 q_no, ern, srn; + u64 time_threshold; + + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + + srn = oct->sriov_info.pf_srn; + ern = srn + oct->sriov_info.num_pf_rings; + + if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) { + octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32); + } else { + /** Set Output queue watermark to 0 to disable backpressure */ + octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0); + } + + for (q_no = srn; q_no < ern; q_no++) { + reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + + /* set DPTR */ + reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; + + /* reset BMODE */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); + + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue ScatterList + * reset ROR_P, NSR_P + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); + +#ifdef __LITTLE_ENDIAN_BITFIELD + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); +#else + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); +#endif + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue Data + * reset ROR, NSR + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); + /* set the ES bit */ + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); + + /* write all the selected settings */ + octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val); + + /* Enabling these interrupt in oct->fn_list.enable_interrupt() + * routine which called after IOQ init. + * Set up interrupt packet and time thresholds + * for all the OQs + */ + time_threshold = cn23xx_pf_get_oq_ticks( + oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf)); + + octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), + (CFG_GET_OQ_INTR_PKT(cn23xx->conf) | + (time_threshold << 32))); + } + + /** Setting the water mark level for pko back pressure **/ + writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK); + + /** Disabling setting OQs in reset when ring has no dorebells + * enabling this will cause of head of line blocking + */ + /* Do it only for pass1.1. and pass1.2 */ + if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) || + (oct->rev_id == OCTEON_CN23XX_REV_1_1)) + writeq(readq((u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_GBL_CONTROL) | 0x2, + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL); + + /** Enable channel-level backpressure */ + if (oct->pf_num) + writeq(0xffffffffffffffffULL, + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S); + else + writeq(0xffffffffffffffffULL, + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S); +} + +static int cn23xx_setup_pf_device_regs(struct octeon_device *oct) +{ + cn23xx_enable_error_reporting(oct); + + /* program the MAC(0..3)_RINFO before setting up input/output regs */ + cn23xx_setup_global_mac_regs(oct); + + if (cn23xx_pf_setup_global_input_regs(oct)) + return -1; + + cn23xx_pf_setup_global_output_regs(oct); + + /* Default error timeout value should be 0x200000 to avoid host hang + * when reads invalid register + */ + octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL, + CN23XX_SLI_WINDOW_CTL_DEFAULT); + + /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */ + octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER); + return 0; +} + +static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + u64 pkt_in_done; + + iq_no += oct->sriov_info.pf_srn; + + /* Write the start of the input queue's ring and its size */ + octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no); + dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter (used in flush_iq + * calculation) + */ + pkt_in_done = readq(iq->inst_cnt_reg); + + if (oct->msix_on) { + /* Set CINT_ENB to enable IQ interrupt */ + writeq((pkt_in_done | CN23XX_INTR_CINT_ENB), + iq->inst_cnt_reg); + } else { + /* Clear the count by writing back what we read, but don't + * enable interrupts + */ + writeq(pkt_in_done, iq->inst_cnt_reg); + } + + iq->reset_instr_cnt = 0; +} + +static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) +{ + u32 reg_val; + struct octeon_droq *droq = oct->droq[oq_no]; + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u64 time_threshold; + u64 cnt_threshold; + + oq_no += oct->sriov_info.pf_srn; + + octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count); + + octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no), + droq->buffer_size); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no); + + if (!oct->msix_on) { + /* Enable this output queue to generate Packet Timer Interrupt + */ + reg_val = + octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); + reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB; + octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no), + reg_val); + + /* Enable this output queue to generate Packet Count Interrupt + */ + reg_val = + octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no)); + reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB; + octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no), + reg_val); + } else { + time_threshold = cn23xx_pf_get_oq_ticks( + oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf)); + cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf); + + octeon_write_csr64( + oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no), + ((time_threshold << 32 | cnt_threshold))); + } +} + +static void cn23xx_pf_mbox_thread(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr; + struct octeon_device *oct = mbox->oct_dev; + u64 mbox_int_val, val64; + u32 q_no, i; + + if (oct->rev_id < OCTEON_CN23XX_REV_1_1) { + /*read and clear by writing 1*/ + mbox_int_val = readq(mbox->mbox_int_reg); + writeq(mbox_int_val, mbox->mbox_int_reg); + + for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + val64 = readq(oct->mbox[q_no]->mbox_write_reg); + + if (val64 && (val64 != OCTEON_PFVFACK)) { + if (octeon_mbox_read(oct->mbox[q_no])) + octeon_mbox_process_message( + oct->mbox[q_no]); + } + } + + schedule_delayed_work(&wk->work, msecs_to_jiffies(10)); + } else { + octeon_mbox_process_message(mbox); + } +} + +static int cn23xx_setup_pf_mbox(struct octeon_device *oct) +{ + struct octeon_mbox *mbox = NULL; + u16 mac_no = oct->pcie_port; + u16 pf_num = oct->pf_num; + u32 q_no, i; + + if (!oct->sriov_info.max_vfs) + return 0; + + for (i = 0; i < oct->sriov_info.max_vfs; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + mbox = vmalloc(sizeof(*mbox)); + if (!mbox) + goto free_mbox; + + memset(mbox, 0, sizeof(struct octeon_mbox)); + + spin_lock_init(&mbox->lock); + + mbox->oct_dev = oct; + + mbox->q_no = q_no; + + mbox->state = OCTEON_MBOX_STATE_IDLE; + + /* PF mbox interrupt reg */ + mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num); + + /* PF writes into SIG0 reg */ + mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0); + + /* PF reads from SIG1 reg */ + mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1); + + /*Mail Box Thread creation*/ + INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, + cn23xx_pf_mbox_thread); + mbox->mbox_poll_wk.ctxptr = (void *)mbox; + + oct->mbox[q_no] = mbox; + + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } + + if (oct->rev_id < OCTEON_CN23XX_REV_1_1) + schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, + msecs_to_jiffies(0)); + + return 0; + +free_mbox: + while (i) { + i--; + vfree(oct->mbox[i]); + } + + return 1; +} + +static int cn23xx_free_pf_mbox(struct octeon_device *oct) +{ + u32 q_no, i; + + if (!oct->sriov_info.max_vfs) + return 0; + + for (i = 0; i < oct->sriov_info.max_vfs; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + cancel_delayed_work_sync( + &oct->mbox[q_no]->mbox_poll_wk.work); + vfree(oct->mbox[q_no]); + } + + return 0; +} + +static int cn23xx_enable_io_queues(struct octeon_device *oct) +{ + u64 reg_val; + u32 srn, ern, q_no; + u32 loop = 1000; + + srn = oct->sriov_info.pf_srn; + ern = srn + oct->num_iqs; + + for (q_no = srn; q_no < ern; q_no++) { + /* set the corresponding IQ IS_64B bit */ + if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) { + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B; + octeon_write_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + + /* set the corresponding IQ ENB bit */ + if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) { + /* IOQs are in reset by default in PEM2 mode, + * clearing reset bit + */ + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + + if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { + while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && + !(reg_val & + CN23XX_PKT_INPUT_CTL_QUIET) && + --loop) { + reg_val = octeon_read_csr64( + oct, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "clearing the reset failed for qno: %u\n", + q_no); + return -1; + } + } + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB; + octeon_write_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + } + for (q_no = srn; q_no < ern; q_no++) { + u32 reg_val; + /* set the corresponding OQ ENB bit */ + if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { + reg_val = octeon_read_csr( + oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no)); + reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB; + octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), + reg_val); + } + } + return 0; +} + +static void cn23xx_disable_io_queues(struct octeon_device *oct) +{ + int q_no, loop; + u64 d64; + u32 d32; + u32 srn, ern; + + srn = oct->sriov_info.pf_srn; + ern = srn + oct->num_iqs; + + /*** Disable Input Queues. ***/ + for (q_no = srn; q_no < ern; q_no++) { + loop = HZ; + + /* start the Reset for a particular ring */ + WRITE_ONCE(d64, octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no))); + WRITE_ONCE(d64, READ_ONCE(d64) & + (~(CN23XX_PKT_INPUT_CTL_RING_ENB))); + WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST); + octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + READ_ONCE(d64)); + + /* Wait until hardware indicates that the particular IQ + * is out of reset. + */ + WRITE_ONCE(d64, octeon_read_csr64( + oct, CN23XX_SLI_PKT_IOQ_RING_RST)); + while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) { + WRITE_ONCE(d64, octeon_read_csr64( + oct, CN23XX_SLI_PKT_IOQ_RING_RST)); + schedule_timeout_uninterruptible(1); + } + + /* Reset the doorbell register for this Input Queue. */ + octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF); + while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) && + loop--) { + schedule_timeout_uninterruptible(1); + } + } + + /*** Disable Output Queues. ***/ + for (q_no = srn; q_no < ern; q_no++) { + loop = HZ; + + /* Wait until hardware indicates that the particular IQ + * is out of reset.It given that SLI_PKT_RING_RST is + * common for both IQs and OQs + */ + WRITE_ONCE(d64, octeon_read_csr64( + oct, CN23XX_SLI_PKT_IOQ_RING_RST)); + while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) { + WRITE_ONCE(d64, octeon_read_csr64( + oct, CN23XX_SLI_PKT_IOQ_RING_RST)); + schedule_timeout_uninterruptible(1); + } + + /* Reset the doorbell register for this Output Queue. */ + octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no), + 0xFFFFFFFF); + while (octeon_read_csr64(oct, + CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) && + loop--) { + schedule_timeout_uninterruptible(1); + } + + /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */ + WRITE_ONCE(d32, octeon_read_csr( + oct, CN23XX_SLI_OQ_PKTS_SENT(q_no))); + octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no), + READ_ONCE(d32)); + } +} + +static u64 cn23xx_pf_msix_interrupt_handler(void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + u64 pkts_sent; + u64 ret = 0; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + + dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct); + + if (!droq) { + dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n", + oct->pf_num, ioq_vector->ioq_num); + return 0; + } + + pkts_sent = readq(droq->pkts_sent_reg); + + /* If our device has interrupted, then proceed. Also check + * for all f's if interrupt was triggered on an error + * and the PCI read fails. + */ + if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) + return ret; + + /* Write count reg in sli_pkt_cnts to clear these int.*/ + if ((pkts_sent & CN23XX_INTR_PO_INT) || + (pkts_sent & CN23XX_INTR_PI_INT)) { + if (pkts_sent & CN23XX_INTR_PO_INT) + ret |= MSIX_PO_INT; + } + + if (pkts_sent & CN23XX_INTR_PI_INT) + /* We will clear the count when we update the read_index. */ + ret |= MSIX_PI_INT; + + /* Never need to handle msix mbox intr for pf. They arrive on the last + * msix + */ + return ret; +} + +static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct) +{ + struct delayed_work *work; + u64 mbox_int_val; + u32 i, q_no; + + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + + for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) { + q_no = i * oct->sriov_info.rings_per_vf; + + if (mbox_int_val & BIT_ULL(q_no)) { + writeq(BIT_ULL(q_no), + oct->mbox[0]->mbox_int_reg); + if (octeon_mbox_read(oct->mbox[q_no])) { + work = &oct->mbox[q_no]->mbox_poll_wk.work; + schedule_delayed_work(work, + msecs_to_jiffies(0)); + } + } + } +} + +static irqreturn_t cn23xx_interrupt_handler(void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u64 intr64; + + dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct); + intr64 = readq(cn23xx->intr_sum_reg64); + + oct->int_status = 0; + + if (intr64 & CN23XX_INTR_ERR) + dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n", + oct->octeon_id, CVM_CAST64(intr64)); + + /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */ + if (intr64 & CN23XX_INTR_VF_MBOX) + cn23xx_handle_pf_mbox_intr(oct); + + if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) { + if (intr64 & CN23XX_INTR_PKT_DATA) + oct->int_status |= OCT_DEV_INTR_PKT_DATA; + } + + if (intr64 & (CN23XX_INTR_DMA0_FORCE)) + oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; + if (intr64 & (CN23XX_INTR_DMA1_FORCE)) + oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; + + /* Clear the current interrupts */ + writeq(intr64, cn23xx->intr_sum_reg64); + + return IRQ_HANDLED; +} + +static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, + u32 idx, int valid) +{ + u64 bar1; + u64 reg_adr; + + if (!valid) { + reg_adr = lio_pci_readq( + oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); + WRITE_ONCE(bar1, reg_adr); + lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL), + CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); + reg_adr = lio_pci_readq( + oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); + WRITE_ONCE(bar1, reg_adr); + return; + } + + /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores + * bits <41:22> of the Core Addr + */ + lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), + CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); + + WRITE_ONCE(bar1, lio_pci_readq( + oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx))); +} + +static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask) +{ + lio_pci_writeq(oct, mask, + CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); +} + +static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx) +{ + return (u32)lio_pci_readq( + oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)); +} + +/* always call with lock held */ +static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) +{ + u32 new_idx; + u32 last_done; + u32 pkt_in_done = readl(iq->inst_cnt_reg); + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + /* Modulo of the new index with the IQ size will give us + * the new index. The iq->reset_instr_cnt is always zero for + * cn23xx, so no extra adjustments are needed. + */ + new_idx = (iq->octeon_read_index + + (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) % + iq->max_count; + + return new_idx; +} + +static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u64 intr_val = 0; + + /* Divide the single write to multiple writes based on the flag. */ + /* Enable Interrupt */ + if (intr_flag == OCTEON_ALL_INTR) { + writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64); + } else if (intr_flag & OCTEON_OUTPUT_INTR) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val |= CN23XX_INTR_PKT_DATA; + writeq(intr_val, cn23xx->intr_enb_reg64); + } else if ((intr_flag & OCTEON_MBOX_INTR) && + (oct->sriov_info.max_vfs > 0)) { + if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val |= CN23XX_INTR_VF_MBOX; + writeq(intr_val, cn23xx->intr_enb_reg64); + } + } +} + +static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u64 intr_val = 0; + + /* Disable Interrupts */ + if (intr_flag == OCTEON_ALL_INTR) { + writeq(0, cn23xx->intr_enb_reg64); + } else if (intr_flag & OCTEON_OUTPUT_INTR) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val &= ~CN23XX_INTR_PKT_DATA; + writeq(intr_val, cn23xx->intr_enb_reg64); + } else if ((intr_flag & OCTEON_MBOX_INTR) && + (oct->sriov_info.max_vfs > 0)) { + if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) { + intr_val = readq(cn23xx->intr_enb_reg64); + intr_val &= ~CN23XX_INTR_VF_MBOX; + writeq(intr_val, cn23xx->intr_enb_reg64); + } + } +} + +static void cn23xx_get_pcie_qlmport(struct octeon_device *oct) +{ + oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; + + dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n", + oct->pcie_port); +} + +static int cn23xx_get_pf_num(struct octeon_device *oct) +{ + u32 fdl_bit = 0; + u64 pkt0_in_ctl, d64; + int pfnum, mac, trs, ret; + + ret = 0; + + /** Read Function Dependency Link reg to get the function number */ + if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL, + &fdl_bit) == 0) { + oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) & + CN23XX_PCIE_SRIOV_FDL_MASK); + } else { + ret = -EINVAL; + + /* Under some virtual environments, extended PCI regs are + * inaccessible, in which case the above read will have failed. + * In this case, read the PF number from the + * SLI_PKT0_INPUT_CONTROL reg (written by f/w) + */ + pkt0_in_ctl = octeon_read_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(0)); + pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff; + + /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/ + d64 = octeon_read_csr64(oct, + CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum)); + trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff; + if (trs == 1) { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum, re-read %u\n", + pfnum); + oct->pf_num = pfnum; + ret = 0; + } else { + dev_err(&oct->pci_dev->dev, + "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n"); + } + } + + return ret; +} + +static void cn23xx_setup_reg_address(struct octeon_device *oct) +{ + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + + oct->reg_list.pci_win_wr_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI); + oct->reg_list.pci_win_wr_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO); + oct->reg_list.pci_win_wr_addr = + (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64); + + oct->reg_list.pci_win_rd_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI); + oct->reg_list.pci_win_rd_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO); + oct->reg_list.pci_win_rd_addr = + (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64); + + oct->reg_list.pci_win_wr_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI); + oct->reg_list.pci_win_wr_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO); + oct->reg_list.pci_win_wr_data = + (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64); + + oct->reg_list.pci_win_rd_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI); + oct->reg_list.pci_win_rd_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO); + oct->reg_list.pci_win_rd_data = + (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64); + + cn23xx_get_pcie_qlmport(oct); + + cn23xx->intr_mask64 = CN23XX_INTR_MASK; + if (!oct->msix_on) + cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME; + if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) + cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX; + + cn23xx->intr_sum_reg64 = + bar0_pciaddr + + CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); + cn23xx->intr_enb_reg64 = + bar0_pciaddr + + CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); +} + +int cn23xx_sriov_config(struct octeon_device *oct) +{ + struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; + u32 max_rings, total_rings, max_vfs, rings_per_vf; + u32 pf_srn, num_pf_rings; + u32 max_possible_vfs; + + cn23xx->conf = + (struct octeon_config *)oct_get_config_info(oct, LIO_23XX); + switch (oct->rev_id) { + case OCTEON_CN23XX_REV_1_0: + max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0; + break; + case OCTEON_CN23XX_REV_1_1: + max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1; + break; + default: + max_rings = CN23XX_MAX_RINGS_PER_PF; + max_possible_vfs = CN23XX_MAX_VFS_PER_PF; + break; + } + + if (oct->sriov_info.num_pf_rings) + num_pf_rings = oct->sriov_info.num_pf_rings; + else + num_pf_rings = num_present_cpus(); + +#ifdef CONFIG_PCI_IOV + max_vfs = min_t(u32, + (max_rings - num_pf_rings), max_possible_vfs); + rings_per_vf = 1; +#else + max_vfs = 0; + rings_per_vf = 0; +#endif + + total_rings = num_pf_rings + max_vfs; + + /* the first ring of the pf */ + pf_srn = total_rings - num_pf_rings; + + oct->sriov_info.trs = total_rings; + oct->sriov_info.max_vfs = max_vfs; + oct->sriov_info.rings_per_vf = rings_per_vf; + oct->sriov_info.pf_srn = pf_srn; + oct->sriov_info.num_pf_rings = num_pf_rings; + dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n", + oct->sriov_info.trs, oct->sriov_info.max_vfs, + oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn, + oct->sriov_info.num_pf_rings); + + oct->sriov_info.sriov_enabled = 0; + + return 0; +} + +int setup_cn23xx_octeon_pf_device(struct octeon_device *oct) +{ + u32 data32; + u64 BAR0, BAR1; + + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32); + BAR0 = (u64)(data32 & ~0xf); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32); + BAR0 |= ((u64)data32 << 32); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32); + BAR1 = (u64)(data32 & ~0xf); + pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32); + BAR1 |= ((u64)data32 << 32); + + if (!BAR0 || !BAR1) { + if (!BAR0) + dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n"); + if (!BAR1) + dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n"); + return 1; + } + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { + dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + if (cn23xx_get_pf_num(oct) != 0) + return 1; + + if (cn23xx_sriov_config(oct)) { + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + return 1; + } + + octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL); + + oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs; + oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs; + oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox; + oct->fn_list.free_mbox = cn23xx_free_pf_mbox; + + oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler; + oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler; + + oct->fn_list.soft_reset = cn23xx_pf_soft_reset; + oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs; + oct->fn_list.update_iq_read_idx = cn23xx_update_read_index; + + oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup; + oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write; + oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read; + + oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt; + oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt; + + oct->fn_list.enable_io_queues = cn23xx_enable_io_queues; + oct->fn_list.disable_io_queues = cn23xx_disable_io_queues; + + cn23xx_setup_reg_address(oct); + + oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct); + + return 0; +} + +int validate_cn23xx_pf_config_info(struct octeon_device *oct, + struct octeon_config *conf23xx) +{ + if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_IQ_MAX_Q(conf23xx), + CN23XX_MAX_INPUT_QUEUES); + return 1; + } + + if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_OQ_MAX_Q(conf23xx), + CN23XX_MAX_OUTPUT_QUEUES); + return 1; + } + + if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR && + CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) { + dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", + __func__); + return 1; + } + + if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) { + dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", + __func__); + return 1; + } + + if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) { + dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", + __func__); + return 1; + } + + return 0; +} + +int cn23xx_fw_loaded(struct octeon_device *oct) +{ + u64 val; + + /* If there's more than one active PF on this NIC, then that + * implies that the NIC firmware is loaded and running. This check + * prevents a rare false negative that might occur if we only relied + * on checking the SCR2_BIT_FW_LOADED flag. The false negative would + * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even + * though the firmware was already loaded but still booting and has yet + * to set SCR2_BIT_FW_LOADED. + */ + if (atomic_read(oct->adapter_refcount) > 1) + return 1; + + val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); + return (val >> SCR2_BIT_FW_LOADED) & 1ULL; +} + +void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, + u8 *mac) +{ + if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) { + struct octeon_mbox_cmd mbox_cmd; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 0; + mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR; + mbox_cmd.msg.s.len = 1; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = NULL; + mbox_cmd.fn_arg = NULL; + ether_addr_copy(mbox_cmd.msg.s.params, mac); + mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; + octeon_mbox_write(oct, &mbox_cmd); + } +} + +static void +cn23xx_get_vf_stats_callback(struct octeon_device *oct, + struct octeon_mbox_cmd *cmd, void *arg) +{ + struct oct_vf_stats_ctx *ctx = arg; + + memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats)); + atomic_set(&ctx->status, 1); +} + +int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx, + struct oct_vf_stats *stats) +{ + u32 timeout = HZ; // 1sec + struct octeon_mbox_cmd mbox_cmd; + struct oct_vf_stats_ctx ctx; + u32 count = 0, ret; + + if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx))) + return -1; + + if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data)) + return -1; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback; + ctx.stats = stats; + atomic_set(&ctx.status, 0); + mbox_cmd.fn_arg = (void *)&ctx; + memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data)); + octeon_mbox_write(oct, &mbox_cmd); + + do { + schedule_timeout_uninterruptible(1); + } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout)); + + ret = atomic_read(&ctx.status); + if (ret == 0) { + octeon_mbox_cancel(oct, 0); + dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n", + vfidx); + return -1; + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h new file mode 100644 index 000000000..e6f31d0d5 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h @@ -0,0 +1,73 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_device.h + * \brief Host Driver: Routines that perform CN23XX specific operations. + */ + +#ifndef __CN23XX_PF_DEVICE_H__ +#define __CN23XX_PF_DEVICE_H__ + +#include "cn23xx_pf_regs.h" + +/* Register address and configuration for a CN23XX devices. + * If device specific changes need to be made then add a struct to include + * device specific fields as shown in the commented section + */ +struct octeon_cn23xx_pf { + /** PCI interrupt summary register */ + u8 __iomem *intr_sum_reg64; + + /** PCI interrupt enable register */ + u8 __iomem *intr_enb_reg64; + + /** The PCI interrupt mask used by interrupt handler */ + u64 intr_mask64; + + struct octeon_config *conf; +}; + +#define CN23XX_SLI_DEF_BP 0x40 + +struct oct_vf_stats { + u64 rx_packets; + u64 tx_packets; + u64 rx_bytes; + u64 tx_bytes; + u64 broadcast; + u64 multicast; +}; + +int setup_cn23xx_octeon_pf_device(struct octeon_device *oct); + +int validate_cn23xx_pf_config_info(struct octeon_device *oct, + struct octeon_config *conf23xx); + +u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); + +void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct); + +int cn23xx_sriov_config(struct octeon_device *oct); + +int cn23xx_fw_loaded(struct octeon_device *oct); + +void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx, + u8 *mac); + +int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx, + struct oct_vf_stats *stats); +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h new file mode 100644 index 000000000..a0fd32476 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h @@ -0,0 +1,599 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN23XX devices. + */ + +#ifndef __CN23XX_PF_REGS_H__ +#define __CN23XX_PF_REGS_H__ + +#define CN23XX_CONFIG_VENDOR_ID 0x00 +#define CN23XX_CONFIG_DEVICE_ID 0x02 + +#define CN23XX_CONFIG_XPANSION_BAR 0x38 + +#define CN23XX_CONFIG_MSIX_CAP 0x50 +#define CN23XX_CONFIG_MSIX_LMSI 0x54 +#define CN23XX_CONFIG_MSIX_UMSI 0x58 +#define CN23XX_CONFIG_MSIX_MSIMD 0x5C +#define CN23XX_CONFIG_MSIX_MSIMM 0x60 +#define CN23XX_CONFIG_MSIX_MSIMP 0x64 + +#define CN23XX_CONFIG_PCIE_CAP 0x70 +#define CN23XX_CONFIG_PCIE_DEVCAP 0x74 +#define CN23XX_CONFIG_PCIE_DEVCTL 0x78 +#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C +#define CN23XX_CONFIG_PCIE_LINKCTL 0x80 +#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84 +#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88 +#define CN23XX_CONFIG_PCIE_DEVCTL2 0x98 +#define CN23XX_CONFIG_PCIE_LINKCTL2 0xA0 +#define CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK 0x108 +#define CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS 0x110 +#define CN23XX_CONFIG_PCIE_DEVCTL_MASK 0x00040000 + +#define CN23XX_PCIE_SRIOV_FDL 0x188 +#define CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10 +#define CN23XX_PCIE_SRIOV_FDL_MASK 0xFF + +#define CN23XX_CONFIG_PCIE_FLTMSK 0x720 + +#define CN23XX_CONFIG_SRIOV_VFDEVID 0x190 + +#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C +#define CN23XX_CONFIG_SRIOV_BARX(i) \ + (CN23XX_CONFIG_SRIOV_BAR_START + ((i) * 4)) +#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08 +#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04 +#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01 + +/* ############## BAR0 Registers ################ */ + +#define CN23XX_SLI_CTL_PORT_START 0x286E0 +#define CN23XX_PORT_OFFSET 0x10 + +#define CN23XX_SLI_CTL_PORT(p) \ + (CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET)) + +/* 2 scatch registers (64-bit) */ +#define CN23XX_SLI_WINDOW_CTL 0x282E0 +#define CN23XX_SLI_SCRATCH1 0x283C0 +#define CN23XX_SLI_SCRATCH2 0x283D0 +#define CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL + +/* 1 registers (64-bit) - SLI_CTL_STATUS */ +#define CN23XX_SLI_CTL_STATUS 0x28570 + +/* SLI Packet Input Jabber Register (64 bit register) + * <31:0> for Byte count for limiting sizes of packet sizes + * that are allowed for sli packet inbound packets. + * the default value is 0xFA00(=64000). + */ +#define CN23XX_SLI_PKT_IN_JABBER 0x29170 +/* The input jabber is used to determine the TSO max size. + * Due to H/W limitation, this needs to be reduced to 60000 + * in order to use H/W TSO and avoid the WQE malformation + * PKO_BUG_24989_WQE_LEN + */ +#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/ + +#define CN23XX_WIN_WR_ADDR_LO 0x20000 +#define CN23XX_WIN_WR_ADDR_HI 0x20004 +#define CN23XX_WIN_WR_ADDR64 CN23XX_WIN_WR_ADDR_LO + +#define CN23XX_WIN_RD_ADDR_LO 0x20010 +#define CN23XX_WIN_RD_ADDR_HI 0x20014 +#define CN23XX_WIN_RD_ADDR64 CN23XX_WIN_RD_ADDR_LO + +#define CN23XX_WIN_WR_DATA_LO 0x20020 +#define CN23XX_WIN_WR_DATA_HI 0x20024 +#define CN23XX_WIN_WR_DATA64 CN23XX_WIN_WR_DATA_LO + +#define CN23XX_WIN_RD_DATA_LO 0x20040 +#define CN23XX_WIN_RD_DATA_HI 0x20044 +#define CN23XX_WIN_RD_DATA64 CN23XX_WIN_RD_DATA_LO + +#define CN23XX_WIN_WR_MASK_LO 0x20030 +#define CN23XX_WIN_WR_MASK_HI 0x20034 +#define CN23XX_WIN_WR_MASK_REG CN23XX_WIN_WR_MASK_LO +#define CN23XX_SLI_MAC_CREDIT_CNT 0x23D70 + +/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)- + * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO + */ +#define CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030 + +/*1 register (64-bit) to determine whether IOQs are in reset. */ +#define CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0 + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_IQ_OFFSET 0x20000 + +#define CN23XX_MAC_RINFO_OFFSET 0x20 +#define CN23XX_PF_RINFO_OFFSET 0x10 + +#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \ + (CN23XX_SLI_PKT_MAC_RINFO_START64 + \ + ((mac) * CN23XX_MAC_RINFO_OFFSET) + \ + ((pf) * CN23XX_PF_RINFO_OFFSET)) + +/** mask for total rings, setting TRS to base */ +#define CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16) +/** mask for starting ring number: setting SRN <6:0> = 0x7F */ +#define CN23XX_PKT_MAC_CTL_RINFO_SRN (0x7F) + +/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */ +#define CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16 +/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */ +#define CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS 0 +/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */ +#define CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS 32 +/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */ +#define CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS 48 + +/*###################### REQUEST QUEUE #########################*/ + +/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */ +#define CN23XX_SLI_IQ_INSTR_COUNT_START64 0x10040 + +/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */ +#define CN23XX_SLI_IQ_BASE_ADDR_START64 0x10010 + +/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */ +#define CN23XX_SLI_IQ_DOORBELL_START 0x10020 + +/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */ +#define CN23XX_SLI_IQ_SIZE_START 0x10030 + +/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL. + */ +#define CN23XX_SLI_IQ_PKT_CONTROL_START64 0x10000 + +/*------- Request Queue Macros ---------*/ +#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ + (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ + (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_SIZE(iq) \ + (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_DOORBELL(iq) \ + (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET)) + +#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ + (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET)) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29) +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25) +#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24) +#define CN23XX_PKT_INPUT_CTL_RST BIT(23) +#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28) +#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22) +#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8) +#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6) +#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5) +#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4) +#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3) +#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2) +#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1) + +/** Rings per Virtual Function **/ +#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F) +#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48) +/** These bits[47:44] select the Physical function number within the MAC */ +#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7) +#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45) +/** These bits[43:32] select the function number within the PF */ +#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF) +#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29) +#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL) +#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32) +#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL) + +#ifdef __LITTLE_ENDIAN_BITFIELD +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE | \ + CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \ + CN23XX_PKT_INPUT_CTL_USE_CSR) +#else +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE | \ + CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \ + CN23XX_PKT_INPUT_CTL_USE_CSR | \ + CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP) +#endif + +/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */ +#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62) +#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48) + +/*############################ OUTPUT QUEUE #########################*/ + +/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */ +#define CN23XX_SLI_OQ_PKT_CONTROL_START 0x10050 + +/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */ +#define CN23XX_SLI_OQ0_BUFF_INFO_SIZE 0x10060 + +/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */ +#define CN23XX_SLI_OQ_BASE_ADDR_START64 0x10070 + +/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */ +#define CN23XX_SLI_OQ_PKT_CREDITS_START 0x10080 + +/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */ +#define CN23XX_SLI_OQ_SIZE_START 0x10090 + +/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */ +#define CN23XX_SLI_OQ_PKT_SENT_START 0x100B0 + +/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */ +#define CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_OQ_OFFSET 0x20000 + +/* 1 (64-bit register) for Output Queue backpressure across all rings. */ +#define CN23XX_SLI_OQ_WMARK 0x29180 + +/* Global pkt control register */ +#define CN23XX_SLI_GBL_CONTROL 0x29210 + +/* Backpressure enable register for PF0 */ +#define CN23XX_SLI_OUT_BP_EN_W1S 0x29260 + +/* Backpressure enable register for PF1 */ +#define CN23XX_SLI_OUT_BP_EN2_W1S 0x29270 + +/* Backpressure disable register for PF0 */ +#define CN23XX_SLI_OUT_BP_EN_W1C 0x29280 + +/* Backpressure disable register for PF1 */ +#define CN23XX_SLI_OUT_BP_EN2_W1C 0x29290 + +/*------- Output Queue Macros ---------*/ + +#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \ + (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \ + (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_SIZE(oq) \ + (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKTS_SENT(oq) \ + (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \ + (CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \ + (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \ + ((oq) * CN23XX_OQ_OFFSET)) + +/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/ +#define CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \ + (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \ + ((oq) * CN23XX_OQ_OFFSET)) + +#define CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \ + (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \ + ((oq) * CN23XX_OQ_OFFSET) + 4) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13) +#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12) +#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11) +#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9) +#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8) +#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7) +#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6) +#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5) +#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3) +#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2) +#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1) +#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0) + +/*######################### Mailbox Reg Macros ########################*/ +#define CN23XX_SLI_PKT_MBOX_INT_START 0x10210 +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200 +#define CN23XX_SLI_MAC_PF_MBOX_INT_START 0x27380 + +#define CN23XX_SLI_MBOX_OFFSET 0x20000 +#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8 + +#define CN23XX_SLI_PKT_MBOX_INT(q) \ + (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) + +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ + (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \ + ((q) * CN23XX_SLI_MBOX_OFFSET + \ + (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET)) + +#define CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf) \ + (CN23XX_SLI_MAC_PF_MBOX_INT_START + \ + ((mac) * CN23XX_MAC_INT_OFFSET + \ + (pf) * CN23XX_PF_INT_OFFSET)) + +/*######################### DMA Counters #########################*/ + +/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */ +#define CN23XX_DMA_CNT_START 0x28400 + +/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */ +/* SLI_DMA_0_TIM */ +#define CN23XX_DMA_TIM_START 0x28420 + +/* 2 registers (64-bit) - DMA count & Time Interrupt threshold - + * SLI_DMA_0_INT_LEVEL + */ +#define CN23XX_DMA_INT_LEVEL_START 0x283E0 + +/* Each DMA register is at a 16-byte Offset in BAR0 */ +#define CN23XX_DMA_OFFSET 0x10 + +/*---------- DMA Counter Macros ---------*/ +#define CN23XX_DMA_CNT(dq) \ + (CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET)) + +#define CN23XX_DMA_INT_LEVEL(dq) \ + (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET)) + +#define CN23XX_DMA_PKT_INT_LEVEL(dq) \ + (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET)) + +#define CN23XX_DMA_TIME_INT_LEVEL(dq) \ + (CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET)) + +#define CN23XX_DMA_TIM(dq) \ + (CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET)) + +/*######################## MSIX TABLE #########################*/ + +#define CN23XX_MSIX_TABLE_ADDR_START 0x0 +#define CN23XX_MSIX_TABLE_DATA_START 0x8 + +#define CN23XX_MSIX_TABLE_SIZE 0x10 +#define CN23XX_MSIX_TABLE_ENTRIES 0x41 + +#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32) + +#define CN23XX_MSIX_TABLE_ADDR(idx) \ + (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +#define CN23XX_MSIX_TABLE_DATA(idx) \ + (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +/*######################## INTERRUPTS #########################*/ +#define CN23XX_MAC_INT_OFFSET 0x20 +#define CN23XX_PF_INT_OFFSET 0x10 + +/* 1 register (64-bit) for Interrupt Summary */ +#define CN23XX_SLI_INT_SUM64 0x27000 + +/* 4 registers (64-bit) for Interrupt Enable for each Port */ +#define CN23XX_SLI_INT_ENB64 0x27080 + +#define CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \ + (CN23XX_SLI_INT_SUM64 + \ + ((mac) * CN23XX_MAC_INT_OFFSET) + \ + ((pf) * CN23XX_PF_INT_OFFSET)) + +#define CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \ + (CN23XX_SLI_INT_ENB64 + \ + ((mac) * CN23XX_MAC_INT_OFFSET) + \ + ((pf) * CN23XX_PF_INT_OFFSET)) + +/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */ +#define CN23XX_SLI_PKT_CNT_INT 0x29130 + +/* 1 register (64-bit) to indicate which Output Queue reached time threshold */ +#define CN23XX_SLI_PKT_TIME_INT 0x29140 + +/*------------------ Interrupt Masks ----------------*/ + +#define CN23XX_INTR_PO_INT BIT_ULL(63) +#define CN23XX_INTR_PI_INT BIT_ULL(62) +#define CN23XX_INTR_MBOX_INT BIT_ULL(61) +#define CN23XX_INTR_RESEND BIT_ULL(60) + +#define CN23XX_INTR_CINT_ENB BIT_ULL(48) +#define CN23XX_INTR_MBOX_ENB BIT(0) + +#define CN23XX_INTR_RML_TIMEOUT_ERR (1) + +#define CN23XX_INTR_MIO_INT BIT(1) + +#define CN23XX_INTR_RESERVED1 (3 << 2) + +#define CN23XX_INTR_PKT_COUNT BIT(4) +#define CN23XX_INTR_PKT_TIME BIT(5) + +#define CN23XX_INTR_RESERVED2 (3 << 6) + +#define CN23XX_INTR_M0UPB0_ERR BIT(8) +#define CN23XX_INTR_M0UPWI_ERR BIT(9) +#define CN23XX_INTR_M0UNB0_ERR BIT(10) +#define CN23XX_INTR_M0UNWI_ERR BIT(11) + +#define CN23XX_INTR_RESERVED3 (0xFFFFFULL << 12) + +#define CN23XX_INTR_DMA0_FORCE BIT_ULL(32) +#define CN23XX_INTR_DMA1_FORCE BIT_ULL(33) + +#define CN23XX_INTR_DMA0_COUNT BIT_ULL(34) +#define CN23XX_INTR_DMA1_COUNT BIT_ULL(35) + +#define CN23XX_INTR_DMA0_TIME BIT_ULL(36) +#define CN23XX_INTR_DMA1_TIME BIT_ULL(37) + +#define CN23XX_INTR_RESERVED4 (0x7FFFFULL << 38) + +#define CN23XX_INTR_VF_MBOX BIT_ULL(57) +#define CN23XX_INTR_DMAVF_ERR BIT_ULL(58) +#define CN23XX_INTR_DMAPF_ERR BIT_ULL(59) + +#define CN23XX_INTR_PKTVF_ERR BIT_ULL(60) +#define CN23XX_INTR_PKTPF_ERR BIT_ULL(61) +#define CN23XX_INTR_PPVF_ERR BIT_ULL(62) +#define CN23XX_INTR_PPPF_ERR BIT_ULL(63) + +#define CN23XX_INTR_DMA0_DATA (CN23XX_INTR_DMA0_TIME) +#define CN23XX_INTR_DMA1_DATA (CN23XX_INTR_DMA1_TIME) + +#define CN23XX_INTR_DMA_DATA \ + (CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA) + +/* By fault only TIME based */ +#define CN23XX_INTR_PKT_DATA (CN23XX_INTR_PKT_TIME) +/* For both COUNT and TIME based */ +/* #define CN23XX_INTR_PKT_DATA \ + * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME) + */ + +/* Sum of interrupts for all PCI-Express Data Interrupts */ +#define CN23XX_INTR_PCIE_DATA \ + (CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT) + +/* Sum of interrupts for error events */ +#define CN23XX_INTR_ERR \ + (CN23XX_INTR_M0UPB0_ERR | \ + CN23XX_INTR_M0UPWI_ERR | \ + CN23XX_INTR_M0UNB0_ERR | \ + CN23XX_INTR_M0UNWI_ERR | \ + CN23XX_INTR_DMAVF_ERR | \ + CN23XX_INTR_DMAPF_ERR | \ + CN23XX_INTR_PKTPF_ERR | \ + CN23XX_INTR_PPPF_ERR | \ + CN23XX_INTR_PPVF_ERR) + +/* Programmed Mask for Interrupt Sum */ +#define CN23XX_INTR_MASK \ + (CN23XX_INTR_DMA_DATA | \ + CN23XX_INTR_DMA0_FORCE | \ + CN23XX_INTR_DMA1_FORCE | \ + CN23XX_INTR_MIO_INT | \ + CN23XX_INTR_ERR) + +/* 4 Registers (64 - bit) */ +#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80 +#define CN23XX_SLI_S2M_PORTX_CTL(port) \ + (CN23XX_SLI_S2M_PORT_CTL_START + ((port) * 0x10)) + +#define CN23XX_SLI_MAC_NUMBER 0x20050 + +/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as + * addr = (0x00011800C0000100 |port <<24 |idx <<3 ) + * Here, port is PEM(0..3) & idx is INDEX(0..15) + */ +#define CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL +#define CN23XX_PEM_OFFSET 24 +#define CN23XX_BAR1_INDEX_OFFSET 3 + +#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \ + (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \ + ((idx) << CN23XX_BAR1_INDEX_OFFSET)) + +/*############################ DPI #########################*/ + +/* 1 register (64-bit) - provides DMA Enable */ +#define CN23XX_DPI_CTL 0x0001df0000000040ULL + +/* 1 register (64-bit) - Controls the DMA IO Operation */ +#define CN23XX_DPI_DMA_CONTROL 0x0001df0000000048ULL + +/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable */ +#define CN23XX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL + +/* 1 register (64-bit) - DPI_REQ_ERR_RSP + * Indicates which Instr'n Queue received error response from the IO sub-system + */ +#define CN23XX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL + +/* 1 register (64-bit) - DPI_REQ_ERR_RST + * Indicates which Instr'n Queue dropped an Instr'n + */ +#define CN23XX_DPI_REQ_ERR_RST 0x0001df0000000060ULL + +/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN + * Provides DMA Engine Queue Enable + */ +#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL +#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + ((eng) * 8)) + +/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL + * Provides control bits for transaction on 8 Queues + */ +#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL +#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \ + (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8)) + +/* 6 register (64-bit) - DPI_ENG(0..5)_BUF + * Provides DMA Engine FIFO (Queue) Size + */ +#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL +#define CN23XX_DPI_DMA_ENG_BUF(eng) \ + (CN23XX_DPI_DMA_ENG0_BUF + ((eng) * 8)) + +/* 4 Registers (64-bit) */ +#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL +#define CN23XX_DPI_SLI_PRTX_CFG(port) \ + (CN23XX_DPI_SLI_PRT_CFG_START + ((port) * 0x8)) + +/* Masks for DPI_DMA_CONTROL Register */ +#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58) +#define CN23XX_DPI_DMA_PKT_EN BIT_ULL(56) +#define CN23XX_DPI_DMA_ENB (0x0FULL << 48) +/* Set the DMA Control, to update packet count not byte count sent by DMA, + * when we use Interrupt Coalescing (CA mode) + */ +#define CN23XX_DPI_DMA_O_ADD1 BIT(19) +/*selecting 64-bit Byte Swap Mode */ +#define CN23XX_DPI_DMA_O_ES BIT(15) +#define CN23XX_DPI_DMA_O_MODE BIT(14) + +#define CN23XX_DPI_DMA_CTL_MASK \ + (CN23XX_DPI_DMA_COMMIT_MODE | \ + CN23XX_DPI_DMA_PKT_EN | \ + CN23XX_DPI_DMA_O_ES | \ + CN23XX_DPI_DMA_O_MODE) + +/*############################ RST #########################*/ + +#define CN23XX_RST_BOOT 0x0001180006001600ULL +#define CN23XX_RST_SOFT_RST 0x0001180006001680ULL + +#define CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL +#define CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c new file mode 100644 index 000000000..fda494049 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c @@ -0,0 +1,682 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "cn23xx_vf_device.h" +#include "octeon_main.h" +#include "octeon_mailbox.h" + +u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) +{ + /* This gives the SLI clock per microsec */ + u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us; + + /* This gives the clock cycles per millisecond */ + oqticks_per_us *= 1000; + + /* This gives the oq ticks (1024 core clock cycles) per millisecond */ + oqticks_per_us /= 1024; + + /* time_intr is in microseconds. The next 2 steps gives the oq ticks + * corressponding to time_intr. + */ + oqticks_per_us *= time_intr_in_us; + oqticks_per_us /= 1000; + + return oqticks_per_us; +} + +static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues) +{ + u32 loop = BUSY_READING_REG_VF_LOOP_COUNT; + int ret_val = 0; + u32 q_no; + u64 d64; + + for (q_no = 0; q_no < num_queues; q_no++) { + /* set RST bit to 1. This bit applies to both IQ and OQ */ + d64 = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + d64 |= CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + d64); + } + + /* wait until the RST bit is clear or the RST and QUIET bits are set */ + for (q_no = 0; q_no < num_queues; q_no++) { + u64 reg_val = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) && + !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) && + loop) { + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); + loop--; + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + WRITE_ONCE(reg_val, READ_ONCE(reg_val) & + ~CN23XX_PKT_INPUT_CTL_RST); + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + READ_ONCE(reg_val)); + + WRITE_ONCE(reg_val, octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); + if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "clearing the reset failed for qno: %u\n", + q_no); + ret_val = -1; + } + } + + return ret_val; +} + +static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + struct octeon_instr_queue *iq; + u64 q_no, intr_threshold; + u64 d64; + + if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf)) + return -1; + + for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { + void __iomem *inst_cnt_reg; + + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no), + 0xFFFFFFFF); + iq = oct->instr_queue[q_no]; + + if (iq) + inst_cnt_reg = iq->inst_cnt_reg; + else + inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr + + CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no); + + d64 = octeon_read_csr64(oct, + CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)); + + d64 &= 0xEFFFFFFFFFFFFFFFL; + + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + d64); + + /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for + * the Input Queues + */ + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), + CN23XX_PKT_INPUT_CTL_MASK); + + /* set the wmark level to trigger PI_INT */ + intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) & + CN23XX_PKT_IN_DONE_WMARK_MASK; + + writeq((readq(inst_cnt_reg) & + ~(CN23XX_PKT_IN_DONE_WMARK_MASK << + CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) | + (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS), + inst_cnt_reg); + } + return 0; +} + +static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct) +{ + u32 reg_val; + u32 q_no; + + for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) { + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no), + 0xFFFFFFFF); + + reg_val = + octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no)); + + reg_val &= 0xEFFFFFFFFFFFFFFFL; + + reg_val = + octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + + /* clear IPTR */ + reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR; + + /* set DPTR */ + reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR; + + /* reset BMODE */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE); + + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue ScatterList reset ROR_P, NSR_P + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P); + +#ifdef __LITTLE_ENDIAN_BITFIELD + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P); +#else + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P); +#endif + /* No Relaxed Ordering, No Snoop, 64-bit Byte swap + * for Output Queue Data reset ROR, NSR + */ + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR); + reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR); + /* set the ES bit */ + reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES); + + /* write all the selected settings */ + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), + reg_val); + } +} + +static int cn23xx_setup_vf_device_regs(struct octeon_device *oct) +{ + if (cn23xx_vf_setup_global_input_regs(oct)) + return -1; + + cn23xx_vf_setup_global_output_regs(oct); + + return 0; +} + +static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + u64 pkt_in_done; + + /* Write the start of the input queue's ring and its size */ + octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr + * for this queue + */ + iq->doorbell_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no); + dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter (used in flush_iq + * calculation) + */ + pkt_in_done = readq(iq->inst_cnt_reg); + + if (oct->msix_on) { + /* Set CINT_ENB to enable IQ interrupt */ + writeq((pkt_in_done | CN23XX_INTR_CINT_ENB), + iq->inst_cnt_reg); + } + iq->reset_instr_cnt = 0; +} + +static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no) +{ + struct octeon_droq *droq = oct->droq[oq_no]; + + octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count); + + octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no), + droq->buffer_size); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no); +} + +static void cn23xx_vf_mbox_thread(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr; + + octeon_mbox_process_message(mbox); +} + +static int cn23xx_free_vf_mbox(struct octeon_device *oct) +{ + cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work); + vfree(oct->mbox[0]); + return 0; +} + +static int cn23xx_setup_vf_mbox(struct octeon_device *oct) +{ + struct octeon_mbox *mbox = NULL; + + mbox = vmalloc(sizeof(*mbox)); + if (!mbox) + return 1; + + memset(mbox, 0, sizeof(struct octeon_mbox)); + + spin_lock_init(&mbox->lock); + + mbox->oct_dev = oct; + + mbox->q_no = 0; + + mbox->state = OCTEON_MBOX_STATE_IDLE; + + /* VF mbox interrupt reg */ + mbox->mbox_int_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0); + /* VF reads from SIG0 reg */ + mbox->mbox_read_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0); + /* VF writes into SIG1 reg */ + mbox->mbox_write_reg = + (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1); + + INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work, + cn23xx_vf_mbox_thread); + + mbox->mbox_poll_wk.ctxptr = mbox; + + oct->mbox[0] = mbox; + + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + + return 0; +} + +static int cn23xx_enable_vf_io_queues(struct octeon_device *oct) +{ + u32 q_no; + + for (q_no = 0; q_no < oct->num_iqs; q_no++) { + u64 reg_val; + + /* set the corresponding IQ IS_64B bit */ + if (oct->io_qmask.iq64B & BIT_ULL(q_no)) { + reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B; + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + + /* set the corresponding IQ ENB bit */ + if (oct->io_qmask.iq & BIT_ULL(q_no)) { + reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB; + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val); + } + } + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + u32 reg_val; + + /* set the corresponding OQ ENB bit */ + if (oct->io_qmask.oq & BIT_ULL(q_no)) { + reg_val = octeon_read_csr( + oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no)); + reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB; + octeon_write_csr( + oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val); + } + } + + return 0; +} + +static void cn23xx_disable_vf_io_queues(struct octeon_device *oct) +{ + u32 num_queues = oct->num_iqs; + + /* per HRM, rings can only be disabled via reset operation, + * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB] + */ + if (num_queues < oct->num_oqs) + num_queues = oct->num_oqs; + + cn23xx_vf_reset_io_queues(oct, num_queues); +} + +void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct) +{ + struct octeon_mbox_cmd mbox_cmd; + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 0; + mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST; + mbox_cmd.msg.s.len = 1; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = NULL; + mbox_cmd.fn_arg = NULL; + + octeon_mbox_write(oct, &mbox_cmd); +} + +static void octeon_pfvf_hs_callback(struct octeon_device *oct, + struct octeon_mbox_cmd *cmd, + void *arg) +{ + u32 major = 0; + + memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params, + CN23XX_MAILBOX_MSGPARAM_SIZE); + if (cmd->recv_len > 1) { + major = ((struct lio_version *)(cmd->data))->major; + major = major << 16; + } + + atomic_set((atomic_t *)arg, major | 1); +} + +int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct) +{ + struct octeon_mbox_cmd mbox_cmd; + u32 q_no, count = 0; + atomic_t status; + u32 pfmajor; + u32 vfmajor; + u32 ret; + + /* Sending VF_ACTIVE indication to the PF driver */ + dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n"); + + mbox_cmd.msg.u64 = 0; + mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST; + mbox_cmd.msg.s.resp_needed = 1; + mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE; + mbox_cmd.msg.s.len = 2; + mbox_cmd.data[0] = 0; + ((struct lio_version *)&mbox_cmd.data[0])->major = + LIQUIDIO_BASE_MAJOR_VERSION; + ((struct lio_version *)&mbox_cmd.data[0])->minor = + LIQUIDIO_BASE_MINOR_VERSION; + ((struct lio_version *)&mbox_cmd.data[0])->micro = + LIQUIDIO_BASE_MICRO_VERSION; + mbox_cmd.q_no = 0; + mbox_cmd.recv_len = 0; + mbox_cmd.recv_status = 0; + mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback; + mbox_cmd.fn_arg = &status; + + octeon_mbox_write(oct, &mbox_cmd); + + atomic_set(&status, 0); + + do { + schedule_timeout_uninterruptible(1); + } while ((!atomic_read(&status)) && (count++ < 100000)); + + ret = atomic_read(&status); + if (!ret) { + dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n"); + return 1; + } + + for (q_no = 0 ; q_no < oct->num_iqs ; q_no++) + oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind; + + vfmajor = LIQUIDIO_BASE_MAJOR_VERSION; + pfmajor = ret >> 16; + if (pfmajor != vfmajor) { + dev_err(&oct->pci_dev->dev, + "VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n", + vfmajor, pfmajor); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, + "VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n", + vfmajor, pfmajor); + + dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n", + oct->pfvf_hsword.pkind); + + return 0; +} + +static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector) +{ + struct octeon_device *oct = ioq_vector->oct_dev; + u64 mbox_int_val; + + if (!ioq_vector->droq_index) { + /* read and clear by writing 1 */ + mbox_int_val = readq(oct->mbox[0]->mbox_int_reg); + writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg); + if (octeon_mbox_read(oct->mbox[0])) + schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work, + msecs_to_jiffies(0)); + } +} + +static u64 cn23xx_vf_msix_interrupt_handler(void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 pkts_sent; + u64 ret = 0; + + dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct); + pkts_sent = readq(droq->pkts_sent_reg); + + /* If our device has interrupted, then proceed. Also check + * for all f's if interrupt was triggered on an error + * and the PCI read fails. + */ + if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL)) + return ret; + + /* Write count reg in sli_pkt_cnts to clear these int. */ + if ((pkts_sent & CN23XX_INTR_PO_INT) || + (pkts_sent & CN23XX_INTR_PI_INT)) { + if (pkts_sent & CN23XX_INTR_PO_INT) + ret |= MSIX_PO_INT; + } + + if (pkts_sent & CN23XX_INTR_PI_INT) + /* We will clear the count when we update the read_index. */ + ret |= MSIX_PI_INT; + + if (pkts_sent & CN23XX_INTR_MBOX_INT) { + cn23xx_handle_vf_mbox_intr(ioq_vector); + ret |= MSIX_MBOX_INT; + } + + return ret; +} + +static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq) +{ + u32 pkt_in_done = readl(iq->inst_cnt_reg); + u32 last_done; + u32 new_idx; + + last_done = pkt_in_done - iq->pkt_in_done; + iq->pkt_in_done = pkt_in_done; + + /* Modulo of the new index with the IQ size will give us + * the new index. The iq->reset_instr_cnt is always zero for + * cn23xx, so no extra adjustments are needed. + */ + new_idx = (iq->octeon_read_index + + (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) % + iq->max_count; + + return new_idx; +} + +static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + u32 q_no, time_threshold; + + if (intr_flag & OCTEON_OUTPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Set up interrupt packet and time thresholds + * for all the OQs + */ + time_threshold = cn23xx_vf_get_oq_ticks( + oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf)); + + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (CFG_GET_OQ_INTR_PKT(cn23xx->conf) | + ((u64)time_threshold << 32))); + } + } + + if (intr_flag & OCTEON_INPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Set CINT_ENB to enable IQ interrupt */ + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + ((octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) & + ~CN23XX_PKT_IN_DONE_CNT_MASK) | + CN23XX_INTR_CINT_ENB)); + } + } + + /* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */ + if (intr_flag & OCTEON_MBOX_INTR) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_PKT_MBOX_INT(0), + (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) | + CN23XX_INTR_MBOX_ENB)); + } +} + +static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag) +{ + u32 q_no; + + if (intr_flag & OCTEON_OUTPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + /* Write all 1's in INT_LEVEL reg to disable PO_INT */ + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + 0x3fffffffffffff); + } + } + if (intr_flag & OCTEON_INPUT_INTR) { + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no), + (octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) & + ~(CN23XX_INTR_CINT_ENB | + CN23XX_PKT_IN_DONE_CNT_MASK))); + } + } + + if (intr_flag & OCTEON_MBOX_INTR) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_PKT_MBOX_INT(0), + (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) & + ~CN23XX_INTR_MBOX_ENB)); + } +} + +int cn23xx_setup_octeon_vf_device(struct octeon_device *oct) +{ + struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip; + u32 rings_per_vf; + u64 reg_val; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + /* INPUT_CONTROL[RPVF] gives the VF IOq count */ + reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0)); + + oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_PF_NUM_MASK; + oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) & + CN23XX_PKT_INPUT_CTL_VF_NUM_MASK; + + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + + rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + + cn23xx->conf = oct_get_config_info(oct, LIO_23XX); + if (!cn23xx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + if (oct->sriov_info.rings_per_vf > rings_per_vf) { + dev_warn(&oct->pci_dev->dev, + "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n", + oct->sriov_info.rings_per_vf, rings_per_vf, + rings_per_vf); + oct->sriov_info.rings_per_vf = rings_per_vf; + } else { + if (rings_per_vf > num_present_cpus()) { + dev_warn(&oct->pci_dev->dev, + "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n", + rings_per_vf, + num_present_cpus(), + num_present_cpus()); + oct->sriov_info.rings_per_vf = + num_present_cpus(); + } else { + oct->sriov_info.rings_per_vf = rings_per_vf; + } + } + + oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs; + oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs; + oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox; + oct->fn_list.free_mbox = cn23xx_free_vf_mbox; + + oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler; + + oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs; + oct->fn_list.update_iq_read_idx = cn23xx_update_read_index; + + oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt; + oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt; + + oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues; + oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues; + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h new file mode 100644 index 000000000..2d06097d3 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h @@ -0,0 +1,48 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_device.h + * \brief Host Driver: Routines that perform CN23XX specific operations. + */ + +#ifndef __CN23XX_VF_DEVICE_H__ +#define __CN23XX_VF_DEVICE_H__ + +#include "cn23xx_vf_regs.h" + +/* Register address and configuration for a CN23XX devices. + * If device specific changes need to be made then add a struct to include + * device specific fields as shown in the commented section + */ +struct octeon_cn23xx_vf { + struct octeon_config *conf; +}; + +#define BUSY_READING_REG_VF_LOOP_COUNT 10000 + +#define CN23XX_MAILBOX_MSGPARAM_SIZE 6 + +void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct); + +int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct); + +int cn23xx_setup_octeon_vf_device(struct octeon_device *oct); + +u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); + +void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct); +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h new file mode 100644 index 000000000..e95610941 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h @@ -0,0 +1,274 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn23xx_vf_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN23XX vf functions. + */ + +#ifndef __CN23XX_VF_REGS_H__ +#define __CN23XX_VF_REGS_H__ + +#define CN23XX_CONFIG_XPANSION_BAR 0x38 + +#define CN23XX_CONFIG_PCIE_CAP 0x70 +#define CN23XX_CONFIG_PCIE_DEVCAP 0x74 +#define CN23XX_CONFIG_PCIE_DEVCTL 0x78 +#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C +#define CN23XX_CONFIG_PCIE_LINKCTL 0x80 +#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84 +#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88 + +#define CN23XX_CONFIG_PCIE_FLTMSK 0x720 + +/* The input jabber is used to determine the TSO max size. + * Due to H/W limitation, this needs to be reduced to 60000 + * in order to use H/W TSO and avoid the WQE malformation + * PKO_BUG_24989_WQE_LEN + */ +#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/ + +/* ############## BAR0 Registers ################ */ + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_VF_IQ_OFFSET 0x20000 + +/*###################### REQUEST QUEUE #########################*/ + +/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */ +#define CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 0x10040 + +/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */ +#define CN23XX_VF_SLI_IQ_BASE_ADDR_START64 0x10010 + +/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */ +#define CN23XX_VF_SLI_IQ_DOORBELL_START 0x10020 + +/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */ +#define CN23XX_VF_SLI_IQ_SIZE_START 0x10030 + +/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL. + */ +#define CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 0x10000 + +/*------- Request Queue Macros ---------*/ +#define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ + (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ + (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_SIZE(iq) \ + (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ + (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET)) + +#define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ + (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET)) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29) +/* Number of instructions to be read in one MAC read request. + * setting to Max value(4) + */ +#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25) +#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24) +#define CN23XX_PKT_INPUT_CTL_RST BIT(23) +#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28) +#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22) +#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8) +#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6) +#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5) +#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4) +#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3) +#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2) +#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1) + +/** Rings per Virtual Function [RO] **/ +#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F) +#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48) +/* These bits[47:44][RO] give the Physical function number info within the MAC*/ +#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7) +#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45) +/** These bits[43:32][RO] give the virtual function number info within the PF*/ +#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF) +#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3) +#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29) +#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL) +#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32) +#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL) + +#ifdef __LITTLE_ENDIAN_BITFIELD +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE \ + | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN23XX_PKT_INPUT_CTL_USE_CSR) +#else +#define CN23XX_PKT_INPUT_CTL_MASK \ + (CN23XX_PKT_INPUT_CTL_RDSIZE \ + | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN23XX_PKT_INPUT_CTL_USE_CSR \ + | CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP) +#endif + +/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */ +#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62) +#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48) + +/*############################ OUTPUT QUEUE #########################*/ + +/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */ +#define CN23XX_VF_SLI_OQ_PKT_CONTROL_START 0x10050 + +/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */ +#define CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE 0x10060 + +/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */ +#define CN23XX_VF_SLI_OQ_BASE_ADDR_START64 0x10070 + +/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */ +#define CN23XX_VF_SLI_OQ_PKT_CREDITS_START 0x10080 + +/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */ +#define CN23XX_VF_SLI_OQ_SIZE_START 0x10090 + +/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */ +#define CN23XX_VF_SLI_OQ_PKT_SENT_START 0x100B0 + +/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */ +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN23XX_VF_OQ_OFFSET 0x20000 + +/*------- Output Queue Macros ---------*/ + +#define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \ + (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \ + (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_SIZE(oq) \ + (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +/* Macro's for accessing CNT and TIME separately from INT_LEVELS */ +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) + +#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \ + (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + \ + ((oq) * CN23XX_VF_OQ_OFFSET) + 4) + +/*------------------ Masks ----------------*/ +#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13) +#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12) +#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11) +#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9) +#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8) +#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7) +#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6) +#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5) +#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3) +#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2) +#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1) +#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0) + +/*######################### Mailbox Reg Macros ########################*/ +#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210 +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200 + +#define CN23XX_SLI_MBOX_OFFSET 0x20000 +#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8 + +#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \ + (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) + +#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ + (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \ + ((q) * CN23XX_SLI_MBOX_OFFSET + \ + (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET)) + +/*######################## INTERRUPTS #########################*/ + +#define CN23XX_VF_SLI_INT_SUM_START 0x100D0 + +#define CN23XX_VF_SLI_INT_SUM(q) \ + (CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET)) + +/*------------------ Interrupt Masks ----------------*/ + +#define CN23XX_INTR_PO_INT BIT_ULL(63) +#define CN23XX_INTR_PI_INT BIT_ULL(62) +#define CN23XX_INTR_MBOX_INT BIT_ULL(61) +#define CN23XX_INTR_RESEND BIT_ULL(60) + +#define CN23XX_INTR_CINT_ENB BIT_ULL(48) +#define CN23XX_INTR_MBOX_ENB BIT(0) + +/*############################ MIO #########################*/ +#define CN23XX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL +#define CN23XX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL +#define CN23XX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL +#define CN23XX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL +#define CN23XX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL +#define CN23XX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL +#define CN23XX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL +#define CN23XX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL +#define CN23XX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL +#define CN23XX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL +#define CN23XX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL +#define CN23XX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL +#define CN23XX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL +#define CN23XX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL + +/*############################ RST #########################*/ +#define CN23XX_RST_BOOT 0x0001180006001600ULL + +/*######################## MSIX TABLE #########################*/ + +#define CN23XX_MSIX_TABLE_ADDR_START 0x0 +#define CN23XX_MSIX_TABLE_DATA_START 0x8 + +#define CN23XX_MSIX_TABLE_SIZE 0x10 +#define CN23XX_MSIX_TABLE_ENTRIES 0x41 + +#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32) + +#define CN23XX_MSIX_TABLE_ADDR(idx) \ + (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +#define CN23XX_MSIX_TABLE_DATA(idx) \ + (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE)) + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c new file mode 100644 index 000000000..39643be8c --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -0,0 +1,737 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" + +int lio_cn6xxx_soft_reset(struct octeon_device *oct) +{ + octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); + + dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); + + lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); + octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); + + lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); + lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); + + /* Wait for 10ms as Octeon resets. */ + mdelay(100); + + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { + dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); + octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); + + return 0; +} + +void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) +{ + u32 val; + + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + if (val & 0x000c0000) { + dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", + val & 0x000c0000); + } + + val |= 0xf; /* Enable Link error reporting */ + + dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n"); + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); +} + +void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, + enum octeon_pcie_mps mps) +{ + u32 val; + u64 r64; + + /* Read config register for MPS */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + + if (mps == PCIE_MPS_DEFAULT) { + mps = ((val & (0x7 << 5)) >> 5); + } else { + val &= ~(0x7 << 5); /* Turn off any MPS bits */ + val |= (mps << 5); /* Set MPS */ + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); + } + + /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */ + r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); + r64 |= (mps << 4); + lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); +} + +void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, + enum octeon_pcie_mrrs mrrs) +{ + u32 val; + u64 r64; + + /* Read config register for MRRS */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + + if (mrrs == PCIE_MRRS_DEFAULT) { + mrrs = ((val & (0x7 << 12)) >> 12); + } else { + val &= ~(0x7 << 12); /* Turn off any MRRS bits */ + val |= (mrrs << 12); /* Set MRRS */ + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); + } + + /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */ + r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); + r64 |= mrrs; + octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); + + /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */ + r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); + r64 |= mrrs; + lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); +} + +u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct) +{ + /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier + * for SLI. + */ + return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50; +} + +u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, + u32 time_intr_in_us) +{ + /* This gives the SLI clock per microsec */ + u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct); + + /* core clock per us / oq ticks will be fractional. TO avoid that + * we use the method below. + */ + + /* This gives the clock cycles per millisecond */ + oqticks_per_us *= 1000; + + /* This gives the oq ticks (1024 core clock cycles) per millisecond */ + oqticks_per_us /= 1024; + + /* time_intr is in microseconds. The next 2 steps gives the oq ticks + * corressponding to time_intr. + */ + oqticks_per_us *= time_intr_in_us; + oqticks_per_us /= 1000; + + return oqticks_per_us; +} + +void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct) +{ + /* Select Round-Robin Arb, ES, RO, NS for Input Queues */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL, + CN6XXX_INPUT_CTL_MASK); + + /* Instruction Read Size - Max 4 instructions per PCIE Read */ + octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE, + 0xFFFFFFFFFFFFFFFFULL); + + /* Select PCIE Port for all Input rings. */ + octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT, + (oct->pcie_port * 0x5555555555555555ULL)); +} + +static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct) +{ + u64 pktctl; + + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); + + /* 66XX SPECIFIC */ + if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4) + /* Disable RING_EN if only upto 4 rings are used. */ + pktctl &= ~(1 << 4); + else + pktctl |= (1 << 4); + + if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) + pktctl |= 0xF; + else + /* Disable per-port backpressure. */ + pktctl &= ~0xF; + octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); +} + +void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) +{ + u32 time_threshold; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + /* / Select PCI-E Port for all Output queues */ + octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64, + (oct->pcie_port * 0x5555555555555555ULL)); + + if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) { + octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32); + } else { + /* / Set Output queue watermark to 0 to disable backpressure */ + octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0); + } + + /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); + + /* Select ES, RO, NS setting from register for Output Queue Packet + * Address + */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); + + /* No Relaxed Ordering, No Snoop, 64-bit swap for Output + * Queue ScatterList + */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0); + + /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */ +#ifdef __BIG_ENDIAN_BITFIELD + octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, + 0x5555555555555555ULL); +#else + octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL); +#endif + + /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0); + octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64, + 0x5555555555555555ULL); + + /* / Set up interrupt packet and time threshold */ + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf)); + time_threshold = + lio_cn6xxx_get_oq_ticks(oct, (u32) + CFG_GET_OQ_INTR_TIME(cn6xxx->conf)); + + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); +} + +static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct) +{ + lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); + lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B); + lio_cn6xxx_enable_error_reporting(oct); + + lio_cn6xxx_setup_global_input_regs(oct); + lio_cn66xx_setup_pkt_ctl_regs(oct); + lio_cn6xxx_setup_global_output_regs(oct); + + /* Default error timeout value should be 0x200000 to avoid host hang + * when reads invalid register + */ + octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); + return 0; +} + +void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); + + /* Write the start of the input queue's ring and its size */ + octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr for this + * queue + */ + iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = oct->mmio[0].hw_addr + + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no); + dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter + * (used in flush_iq calculation) + */ + iq->reset_instr_cnt = readl(iq->inst_cnt_reg); +} + +static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + lio_cn6xxx_setup_iq_regs(oct, iq_no); + + /* Backpressure for this queue - WMARK set to all F's. This effectively + * disables the backpressure mechanism. + */ + octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no), + (0xFFFFFFFFULL << 32)); +} + +void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) +{ + u32 intr; + struct octeon_droq *droq = oct->droq[oq_no]; + + octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); + + octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no), + droq->buffer_size); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = + oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = + oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no); + + /* Enable this output queue to generate Packet Timer Interrupt */ + intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + intr |= (1 << oq_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr); + + /* Enable this output queue to generate Packet Timer Interrupt */ + intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + intr |= (1 << oq_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr); +} + +int lio_cn6xxx_enable_io_queues(struct octeon_device *oct) +{ + u32 mask; + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE); + mask |= oct->io_qmask.iq64B; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask); + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); + mask |= oct->io_qmask.iq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + mask |= oct->io_qmask.oq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); + + return 0; +} + +void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) +{ + int i; + u32 mask, loop = HZ; + u32 d32; + + /* Reset the Enable bits for Input Queues. */ + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); + mask ^= oct->io_qmask.iq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); + + /* Wait until hardware indicates that the queues are out of reset. */ + mask = (u32)oct->io_qmask.iq; + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); + while (((d32 & mask) != mask) && loop--) { + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); + schedule_timeout_uninterruptible(1); + } + + /* Reset the doorbell register for each Input queue. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); + d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); + } + + /* Reset the Enable bits for Output Queues. */ + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + mask ^= oct->io_qmask.oq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); + + /* Wait until hardware indicates that the queues are out of reset. */ + loop = HZ; + mask = (u32)oct->io_qmask.oq; + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); + while (((d32 & mask) != mask) && loop--) { + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); + schedule_timeout_uninterruptible(1); + } + ; + + /* Reset the doorbell register for each Output queue. */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); + d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); + + d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i)); + octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32); + } + + d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); + if (d32) + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32); + + d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); + if (d32) + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32); +} + +void +lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, + u64 core_addr, + u32 idx, + int valid) +{ + u64 bar1; + + if (valid == 0) { + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); + lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), + CN6XXX_BAR1_REG(idx, oct->pcie_port)); + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); + return; + } + + /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of + * the Core Addr + */ + lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), + CN6XXX_BAR1_REG(idx, oct->pcie_port)); + + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, + u32 idx, + u32 mask) +{ + lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) +{ + return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +u32 +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq) +{ + u32 new_idx = readl(iq->inst_cnt_reg); + + /* The new instr cnt reg is a 32-bit counter that can roll over. We have + * noted the counter's initial value at init time into + * reset_instr_cnt + */ + if (iq->reset_instr_cnt < new_idx) + new_idx -= iq->reset_instr_cnt; + else + new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; + + /* Modulo of the new index with the IQ size will give us + * the new index. + */ + new_idx %= iq->max_count; + + return new_idx; +} + +void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, + u8 unused __attribute__((unused))) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE; + + /* Enable Interrupt */ + writeq(mask, cn6xxx->intr_enb_reg64); +} + +void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, + u8 unused __attribute__((unused))) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + /* Disable Interrupts */ + writeq(0, cn6xxx->intr_enb_reg64); +} + +static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) +{ + /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register + * to determine the PCIE port # + */ + oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff; + + dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); +} + +static void +lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) +{ + dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", + CVM_CAST64(intr64)); +} + +static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) +{ + struct octeon_droq *droq; + int oq_no; + u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb; + u32 droq_cnt_enb, droq_cnt_mask; + + droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); + droq_mask = droq_cnt_mask & droq_cnt_enb; + + droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); + droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + droq_mask |= (droq_time_mask & droq_int_enb); + + droq_mask &= oct->io_qmask.oq; + + oct->droq_intr = 0; + + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) { + if (!(droq_mask & BIT_ULL(oq_no))) + continue; + + droq = oct->droq[oq_no]; + pkt_count = octeon_droq_check_hw_for_pkts(droq); + if (pkt_count) { + oct->droq_intr |= BIT_ULL(oq_no); + if (droq->ops.poll_mode) { + u32 value; + u32 reg; + + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + /* disable interrupts for this droq */ + spin_lock + (&cn6xxx->lock_for_droq_int_enb_reg); + reg = CN6XXX_SLI_PKT_TIME_INT_ENB; + value = octeon_read_csr(oct, reg); + value &= ~(1 << oq_no); + octeon_write_csr(oct, reg, value); + reg = CN6XXX_SLI_PKT_CNT_INT_ENB; + value = octeon_read_csr(oct, reg); + value &= ~(1 << oq_no); + octeon_write_csr(oct, reg, value); + + spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); + } + } + } + + droq_time_mask &= oct->io_qmask.oq; + droq_cnt_mask &= oct->io_qmask.oq; + + /* Reset the PKT_CNT/TIME_INT registers. */ + if (droq_time_mask) + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask); + + if (droq_cnt_mask) /* reset PKT_CNT register:66xx */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask); + + return 0; +} + +irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + u64 intr64; + + intr64 = readq(cn6xxx->intr_sum_reg64); + + /* If our device has interrupted, then proceed. + * Also check for all f's if interrupt was triggered on an error + * and the PCI read fails. + */ + if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL)) + return IRQ_NONE; + + oct->int_status = 0; + + if (intr64 & CN6XXX_INTR_ERR) + lio_cn6xxx_process_pcie_error_intr(oct, intr64); + + if (intr64 & CN6XXX_INTR_PKT_DATA) { + lio_cn6xxx_process_droq_intr_regs(oct); + oct->int_status |= OCT_DEV_INTR_PKT_DATA; + } + + if (intr64 & CN6XXX_INTR_DMA0_FORCE) + oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; + + if (intr64 & CN6XXX_INTR_DMA1_FORCE) + oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; + + /* Clear the current interrupts */ + writeq(intr64, cn6xxx->intr_sum_reg64); + + return IRQ_HANDLED; +} + +void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, + void *chip, + struct octeon_reg_list *reg_list) +{ + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; + + reg_list->pci_win_wr_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI); + reg_list->pci_win_wr_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO); + reg_list->pci_win_wr_addr = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64); + + reg_list->pci_win_rd_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI); + reg_list->pci_win_rd_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO); + reg_list->pci_win_rd_addr = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64); + + reg_list->pci_win_wr_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI); + reg_list->pci_win_wr_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO); + reg_list->pci_win_wr_data = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64); + + reg_list->pci_win_rd_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI); + reg_list->pci_win_rd_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO); + reg_list->pci_win_rd_data = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64); + + lio_cn6xxx_get_pcie_qlmport(oct); + + cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64; + cn6xxx->intr_mask64 = CN6XXX_INTR_MASK; + cn6xxx->intr_enb_reg64 = + bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port); +} + +int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { + dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg); + + oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs; + oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; + + oct->fn_list.soft_reset = lio_cn6xxx_soft_reset; + oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs; + oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; + + oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; + oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; + oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; + + oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; + oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; + oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; + + oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; + oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; + + lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); + + cn6xxx->conf = (struct octeon_config *) + oct_get_config_info(oct, LIO_210SV); + if (!cn6xxx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + return 1; + } + + oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); + + return 0; +} + +int lio_validate_cn6xxx_config_info(struct octeon_device *oct, + struct octeon_config *conf6xxx) +{ + if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_IQ_MAX_Q(conf6xxx), + CN6XXX_MAX_INPUT_QUEUES); + return 1; + } + + if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_OQ_MAX_Q(conf6xxx), + CN6XXX_MAX_OUTPUT_QUEUES); + return 1; + } + + if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR && + CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) { + dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", + __func__); + return 1; + } + if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) { + dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", + __func__); + return 1; + } + + if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) { + dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n", + __func__); + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h new file mode 100644 index 000000000..8ed57134e --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -0,0 +1,98 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn66xx_device.h + * \brief Host Driver: Routines that perform CN66XX specific operations. + */ + +#ifndef __CN66XX_DEVICE_H__ +#define __CN66XX_DEVICE_H__ + +/* Register address and configuration for a CN6XXX devices. + * If device specific changes need to be made then add a struct to include + * device specific fields as shown in the commented section + */ +struct octeon_cn6xxx { + /** PCI interrupt summary register */ + u8 __iomem *intr_sum_reg64; + + /** PCI interrupt enable register */ + u8 __iomem *intr_enb_reg64; + + /** The PCI interrupt mask used by interrupt handler */ + u64 intr_mask64; + + struct octeon_config *conf; + + /* Example additional fields - not used currently + * struct { + * }cn6xyz; + */ + + /* For the purpose of atomic access to interrupt enable reg */ + spinlock_t lock_for_droq_int_enb_reg; + +}; + +enum octeon_pcie_mps { + PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */ + PCIE_MPS_128B = 0, + PCIE_MPS_256B = 1 +}; + +enum octeon_pcie_mrrs { + PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */ + PCIE_MRRS_128B = 0, + PCIE_MRRS_256B = 1, + PCIE_MRRS_512B = 2, + PCIE_MRRS_1024B = 3, + PCIE_MRRS_2048B = 4, + PCIE_MRRS_4096B = 5 +}; + +/* Common functions for 66xx and 68xx */ +int lio_cn6xxx_soft_reset(struct octeon_device *oct); +void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct); +void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, + enum octeon_pcie_mps mps); +void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, + enum octeon_pcie_mrrs mrrs); +void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct); +void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct); +void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no); +void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); +int lio_cn6xxx_enable_io_queues(struct octeon_device *oct); +void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); +irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev); +void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, + u32 idx, int valid); +void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); +u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); +u32 +lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq); +void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused); +void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused); +void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); +void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip, + struct octeon_reg_list *reg_list); +u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct); +u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); +int lio_setup_cn66xx_octeon_device(struct octeon_device *oct); +int lio_validate_cn6xxx_config_info(struct octeon_device *oct, + struct octeon_config *conf6xxx); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h new file mode 100644 index 000000000..7aad40b2a --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -0,0 +1,530 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn66xx_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN66XX devices. + */ + +#ifndef __CN66XX_REGS_H__ +#define __CN66XX_REGS_H__ + +#define CN6XXX_XPANSION_BAR 0x30 + +#define CN6XXX_MSI_CAP 0x50 +#define CN6XXX_MSI_ADDR_LO 0x54 +#define CN6XXX_MSI_ADDR_HI 0x58 +#define CN6XXX_MSI_DATA 0x5C + +#define CN6XXX_PCIE_CAP 0x70 +#define CN6XXX_PCIE_DEVCAP 0x74 +#define CN6XXX_PCIE_DEVCTL 0x78 +#define CN6XXX_PCIE_LINKCAP 0x7C +#define CN6XXX_PCIE_LINKCTL 0x80 +#define CN6XXX_PCIE_SLOTCAP 0x84 +#define CN6XXX_PCIE_SLOTCTL 0x88 + +#define CN6XXX_PCIE_ENH_CAP 0x100 +#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104 +#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108 +#define CN6XXX_PCIE_UNCORR_ERR 0x10C +#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110 +#define CN6XXX_PCIE_CORR_ERR_MASK 0x114 +#define CN6XXX_PCIE_ADV_ERR_CAP 0x118 + +#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700 +#define CN6XXX_PCIE_OTHER_MSG 0x704 +#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708 +#define CN6XXX_PCIE_ACK_FREQ 0x70C +#define CN6XXX_PCIE_PORT_LINK_CTL 0x710 +#define CN6XXX_PCIE_LANE_SKEW 0x714 +#define CN6XXX_PCIE_SYM_NUM 0x718 +#define CN6XXX_PCIE_FLTMSK 0x720 + +/* ############## BAR0 Registers ################ */ + +#define CN6XXX_SLI_CTL_PORT0 0x0050 +#define CN6XXX_SLI_CTL_PORT1 0x0060 + +#define CN6XXX_SLI_WINDOW_CTL 0x02E0 +#define CN6XXX_SLI_DBG_DATA 0x0310 +#define CN6XXX_SLI_SCRATCH1 0x03C0 +#define CN6XXX_SLI_SCRATCH2 0x03D0 +#define CN6XXX_SLI_CTL_STATUS 0x0570 + +#define CN6XXX_WIN_WR_ADDR_LO 0x0000 +#define CN6XXX_WIN_WR_ADDR_HI 0x0004 +#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO + +#define CN6XXX_WIN_RD_ADDR_LO 0x0010 +#define CN6XXX_WIN_RD_ADDR_HI 0x0014 +#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO + +#define CN6XXX_WIN_WR_DATA_LO 0x0020 +#define CN6XXX_WIN_WR_DATA_HI 0x0024 +#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO + +#define CN6XXX_WIN_RD_DATA_LO 0x0040 +#define CN6XXX_WIN_RD_DATA_HI 0x0044 +#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO + +#define CN6XXX_WIN_WR_MASK_LO 0x0030 +#define CN6XXX_WIN_WR_MASK_HI 0x0034 +#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO + +/* 1 register (32-bit) to enable Input queues */ +#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000 + +/* 1 register (32-bit) to enable Output queues */ +#define CN6XXX_SLI_PKT_OUT_ENB 0x1010 + +/* 1 register (32-bit) to determine whether Output queues are in reset. */ +#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0 + +/* 1 register (32-bit) to determine whether Input queues are in reset. */ +#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4 + +/*###################### REQUEST QUEUE #########################*/ + +/* 1 register (32-bit) - instr. size of each input queue. */ +#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020 + +/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */ +#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000 + +/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */ +#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800 + +/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */ +#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00 + +/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */ +#define CN6XXX_SLI_IQ_SIZE_START 0x3000 + +/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */ +#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400 + +/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */ +#define CN66XX_SLI_INPUT_BP_START64 0x3800 + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_IQ_OFFSET 0x10 + +/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT_INPUT_CONTROL. + */ +#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170 + +/* 1 register (64-bit) - Number of instructions to read at one time + * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE. + */ +#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0 + +/* 1 register (64-bit) - Assign Input ring to MAC port + * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT. + */ +#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0 + +/*------- Request Queue Macros ---------*/ +#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ + (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_SIZE(iq) \ + (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ + (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_DOORBELL(iq) \ + (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ + (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN66XX_SLI_IQ_BP64(iq) \ + (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +/*------------------ Masks ----------------*/ +#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22) +#define CN6XXX_INPUT_CTL_DATA_NS BIT(8) +#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6) +#define CN6XXX_INPUT_CTL_DATA_RO BIT(5) +#define CN6XXX_INPUT_CTL_USE_CSR BIT(4) +#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3) +#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2) +#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1) + +#ifdef __BIG_ENDIAN_BITFIELD +#define CN6XXX_INPUT_CTL_MASK \ + (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN6XXX_INPUT_CTL_USE_CSR \ + | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP) +#else +#define CN6XXX_INPUT_CTL_MASK \ + (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN6XXX_INPUT_CTL_USE_CSR) +#endif + +/*############################ OUTPUT QUEUE #########################*/ + +/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */ +#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00 + +/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */ +#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400 + +/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */ +#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800 + +/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */ +#define CN6XXX_SLI_OQ_SIZE_START 0x1C00 + +/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */ +#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_OQ_OFFSET 0x10 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Relaxed Ordering setting for reading Output Queues descriptors + * - SLI_PKT_SLIST_ROR + */ +#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030 + +/* 1 register (32-bit) - 1 bit for each output queue + * - No Snoop mode for reading Output Queues descriptors + * - SLI_PKT_SLIST_NS + */ +#define CN6XXX_SLI_PKT_SLIST_NS 0x1040 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Endian-Swap mode for reading Output Queue descriptors + * - SLI_PKT_SLIST_ES + */ +#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050 + +/* 1 register (32-bit) - 1 bit for each output queue + * - InfoPtr mode for Output Queues. + * - SLI_PKT_IPTR + */ +#define CN6XXX_SLI_PKT_IPTR 0x1070 + +/* 1 register (32-bit) - 1 bit for each output queue + * - DPTR format selector for Output queues. + * - SLI_PKT_DPADDR + */ +#define CN6XXX_SLI_PKT_DPADDR 0x1080 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Relaxed Ordering setting for reading Output Queues data + * - SLI_PKT_DATA_OUT_ROR + */ +#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090 + +/* 1 register (32-bit) - 1 bit for each output queue + * - No Snoop mode for reading Output Queues data + * - SLI_PKT_DATA_OUT_NS + */ +#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Endian-Swap mode for reading Output Queue data + * - SLI_PKT_DATA_OUT_ES + */ +#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets. + * - SLI_PKT_OUT_BMODE + */ +#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Assign PCIE port for Output queues + * - SLI_PKT_PCIE_PORT. + */ +#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0 + +/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold + * & Time Threshold. The same setting applies to all 32 queues. + * The register is defined as a 64-bit registers, but we use the + * 32-bit offsets to define distinct addresses. + */ +#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120 +#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124 + +/* 1 (64-bit register) for Output Queue backpressure across all rings. */ +#define CN6XXX_SLI_OQ_WMARK 0x1180 + +/* 1 register to control output queue global backpressure & ring enable. */ +#define CN6XXX_SLI_PKT_CTL 0x1220 + +/*------- Output Queue Macros ---------*/ +#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \ + (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_SIZE(oq) \ + (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \ + (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \ + (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET)) + +/*######################### DMA Counters #########################*/ + +/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */ +#define CN6XXX_DMA_CNT_START 0x0400 + +/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values + * SLI_DMA_0_TIM + */ +#define CN6XXX_DMA_TIM_START 0x0420 + +/* 2 registers (64-bit) - DMA count & Time Interrupt threshold - + * SLI_DMA_0_INT_LEVEL + */ +#define CN6XXX_DMA_INT_LEVEL_START 0x03E0 + +/* Each DMA register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_DMA_OFFSET 0x10 + +/*---------- DMA Counter Macros ---------*/ +#define CN6XXX_DMA_CNT(dq) \ + (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_TIM(dq) \ + (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET)) + +/*######################## INTERRUPTS #########################*/ + +/* 1 register (64-bit) for Interrupt Summary */ +#define CN6XXX_SLI_INT_SUM64 0x0330 + +/* 1 register (64-bit) for Interrupt Enable */ +#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340 +#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350 + +/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */ +#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150 + +/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */ +#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160 + +/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */ +#define CN6XXX_SLI_PKT_CNT_INT 0x1130 + +/* 1 register (32-bit) to indicate which Output Queue reached time threshold */ +#define CN6XXX_SLI_PKT_TIME_INT 0x1140 + +/*------------------ Interrupt Masks ----------------*/ + +#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1) +#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2) +#define CN6XXX_INTR_IO2BIG_ERR BIT(3) +#define CN6XXX_INTR_PKT_COUNT BIT(4) +#define CN6XXX_INTR_PKT_TIME BIT(5) +#define CN6XXX_INTR_M0UPB0_ERR BIT(8) +#define CN6XXX_INTR_M0UPWI_ERR BIT(9) +#define CN6XXX_INTR_M0UNB0_ERR BIT(10) +#define CN6XXX_INTR_M0UNWI_ERR BIT(11) +#define CN6XXX_INTR_M1UPB0_ERR BIT(12) +#define CN6XXX_INTR_M1UPWI_ERR BIT(13) +#define CN6XXX_INTR_M1UNB0_ERR BIT(14) +#define CN6XXX_INTR_M1UNWI_ERR BIT(15) +#define CN6XXX_INTR_MIO_INT0 BIT(16) +#define CN6XXX_INTR_MIO_INT1 BIT(17) +#define CN6XXX_INTR_MAC_INT0 BIT(18) +#define CN6XXX_INTR_MAC_INT1 BIT(19) + +#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32) +#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33) +#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34) +#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35) +#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36) +#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37) +#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48) +#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49) +#define CN6XXX_INTR_POUT_ERR BIT_ULL(50) +#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51) +#define CN6XXX_INTR_PGL_ERR BIT_ULL(52) +#define CN6XXX_INTR_PDI_ERR BIT_ULL(53) +#define CN6XXX_INTR_POP_ERR BIT_ULL(54) +#define CN6XXX_INTR_PINS_ERR BIT_ULL(55) +#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56) +#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57) +#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60) + +#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME) + +#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME) + +#define CN6XXX_INTR_DMA_DATA \ + (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA) + +#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \ + CN6XXX_INTR_PKT_COUNT) + +/* Sum of interrupts for all PCI-Express Data Interrupts */ +#define CN6XXX_INTR_PCIE_DATA \ + (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA) + +#define CN6XXX_INTR_MIO \ + (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1) + +#define CN6XXX_INTR_MAC \ + (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1) + +/* Sum of interrupts for error events */ +#define CN6XXX_INTR_ERR \ + (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \ + | CN6XXX_INTR_IO2BIG_ERR \ + | CN6XXX_INTR_M0UPB0_ERR \ + | CN6XXX_INTR_M0UPWI_ERR \ + | CN6XXX_INTR_M0UNB0_ERR \ + | CN6XXX_INTR_M0UNWI_ERR \ + | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UPWI_ERR \ + | CN6XXX_INTR_M1UNB0_ERR \ + | CN6XXX_INTR_M1UNWI_ERR \ + | CN6XXX_INTR_INSTR_DB_OF_ERR \ + | CN6XXX_INTR_SLIST_DB_OF_ERR \ + | CN6XXX_INTR_POUT_ERR \ + | CN6XXX_INTR_PIN_BP_ERR \ + | CN6XXX_INTR_PGL_ERR \ + | CN6XXX_INTR_PDI_ERR \ + | CN6XXX_INTR_POP_ERR \ + | CN6XXX_INTR_PINS_ERR \ + | CN6XXX_INTR_SPRT0_ERR \ + | CN6XXX_INTR_SPRT1_ERR \ + | CN6XXX_INTR_ILL_PAD_ERR) + +/* Programmed Mask for Interrupt Sum */ +#define CN6XXX_INTR_MASK \ + (CN6XXX_INTR_PCIE_DATA \ + | CN6XXX_INTR_DMA0_FORCE \ + | CN6XXX_INTR_DMA1_FORCE \ + | CN6XXX_INTR_MIO \ + | CN6XXX_INTR_MAC \ + | CN6XXX_INTR_ERR) + +#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80 +#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90 +#define CN6XXX_SLI_S2M_PORTX_CTL(port) \ + (CN6XXX_SLI_S2M_PORT0_CTL + ((port) * 0x10)) + +#define CN6XXX_SLI_INT_ENB64(port) \ + (CN6XXX_SLI_INT_ENB64_PORT0 + ((port) * 0x10)) + +#define CN6XXX_SLI_MAC_NUMBER 0x3E00 + +/* CN6XXX BAR1 Index registers. */ +#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL +#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL + +#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000 +#define CN6XXX_PCI_BAR1_OFFSET 0x8 + +#define CN6XXX_BAR1_REG(idx, port) \ + (CN6XXX_BAR1_INDEX_START + ((port) * CN6XXX_PEM_OFFSET) + \ + (CN6XXX_PCI_BAR1_OFFSET * (idx))) + +/*############################ DPI #########################*/ + +#define CN6XXX_DPI_CTL 0x0001df0000000040ULL + +#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL + +#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL + +#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL + +#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL + +#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL + +#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ + (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8)) + +#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL + +#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ + (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8)) + +#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL +#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL +#define CN6XXX_DPI_SLI_PRTX_CFG(port) \ + (CN6XXX_DPI_SLI_PRT0_CFG + ((port) * 0x10)) + +#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58) +#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57) +#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56) +#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15) +#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14) + +#define CN6XXX_DPI_DMA_CTL_MASK \ + (CN6XXX_DPI_DMA_COMMIT_MODE | \ + CN6XXX_DPI_DMA_PKT_HP | \ + CN6XXX_DPI_DMA_PKT_EN | \ + CN6XXX_DPI_DMA_O_ES | \ + CN6XXX_DPI_DMA_O_MODE) + +/*############################ CIU #########################*/ + +#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL +#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL + +/*############################ MIO #########################*/ +#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL +#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL +#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL +#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL +#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL +#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL +#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL +#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL +#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL +#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL +#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL +#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL +#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL +#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL + +#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL +#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL + +#define CN6XXX_MIO_QLM_CFG_MASK 0x7 + +/*############################ LMC #########################*/ + +#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL +#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c new file mode 100644 index 000000000..30254e4cf --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -0,0 +1,183 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_device.h" +#include "cn68xx_regs.h" + +static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) +{ + u32 i; + u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 }; + + lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); + dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", + lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); + + for (i = 0; i < 6; i++) { + /* Prevent service of instruction queue for all DMA engines + * Engine 5 will remain 0. Engines 0 - 4 will be setup by + * core. + */ + lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); + lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); + dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, + lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); + } + + /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set + * separately. + */ + + lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); + dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", + lio_pci_readq(oct, CN6XXX_DPI_CTL)); +} + +static int lio_cn68xx_soft_reset(struct octeon_device *oct) +{ + lio_cn6xxx_soft_reset(oct); + lio_cn68xx_set_dpi_regs(oct); + + return 0; +} + +static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; + u64 pktctl, tx_pipe, max_oqs; + + pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); + + /* 68XX specific */ + max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx)); + tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE); + tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */ + tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */ + octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe); + + if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf)) + pktctl |= 0xF; + else + /* Disable per-port backpressure. */ + pktctl &= ~0xF; + octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); +} + +static int lio_cn68xx_setup_device_regs(struct octeon_device *oct) +{ + lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); + lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B); + lio_cn6xxx_enable_error_reporting(oct); + + lio_cn6xxx_setup_global_input_regs(oct); + lio_cn68xx_setup_pkt_ctl_regs(oct); + lio_cn6xxx_setup_global_output_regs(oct); + + /* Default error timeout value should be 0x200000 to avoid host hang + * when reads invalid register + */ + octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); + + return 0; +} + +static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) +{ + u32 val = 0; + + /* Set M_VEND1_DRP and M_VEND0_DRP bits */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val); + val |= 0x3; + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); +} + +static int lio_is_210nv(struct octeon_device *oct) +{ + u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); + + return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0); +} + +int lio_setup_cn68xx_octeon_device(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; + u16 card_type = LIO_410NV; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { + dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg); + + oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs; + oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; + + oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; + oct->fn_list.soft_reset = lio_cn68xx_soft_reset; + oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs; + oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; + + oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; + oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; + oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; + + oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; + oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; + + oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; + oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; + + lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); + + /* Determine variant of card */ + if (lio_is_210nv(oct)) + card_type = LIO_210NV; + + cn68xx->conf = (struct octeon_config *) + oct_get_config_info(oct, card_type); + if (!cn68xx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n", + __func__, + (card_type == LIO_410NV) ? LIO_410NV_NAME : + LIO_210NV_NAME); + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + return 1; + } + + oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); + + lio_cn68xx_vendor_message_fix(oct); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h new file mode 100644 index 000000000..66b8d6bf5 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h @@ -0,0 +1,27 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn68xx_device.h + * \brief Host Driver: Routines that perform CN68XX specific operations. + */ + +#ifndef __CN68XX_DEVICE_H__ +#define __CN68XX_DEVICE_H__ + +int lio_setup_cn68xx_octeon_device(struct octeon_device *oct); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h new file mode 100644 index 000000000..0b742f09e --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h @@ -0,0 +1,45 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file cn68xx_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN68XX devices. The register map for CN66XX is the same + * for most registers. This file has the other registers that are + * 68XX-specific. + */ + +#ifndef __CN68XX_REGS_H__ +#define __CN68XX_REGS_H__ + +/*###################### REQUEST QUEUE #########################*/ + +#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800 + +#define CN68XX_SLI_IQ_PORT_PKIND(iq) \ + (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET)) + +/*############################ OUTPUT QUEUE #########################*/ + +/* Starting pipe number and number of pipes used by the SLI packet output. */ +#define CN68XX_SLI_TX_PIPE 0x1230 + +/*######################## INTERRUPTS #########################*/ + +/*------------------ Interrupt Masks ----------------*/ +#define CN68XX_INTR_PIPE_ERR BIT_ULL(61) + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c new file mode 100644 index 000000000..882b2be06 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c @@ -0,0 +1,1814 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/if_vlan.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" + +/* OOM task polling interval */ +#define LIO_OOM_POLL_INTERVAL_MS 250 + +#define OCTNIC_MAX_SG MAX_SKB_FRAGS + +/** + * lio_delete_glists - Delete gather lists + * @lio: per-network private data + */ +void lio_delete_glists(struct lio *lio) +{ + struct octnic_gather *g; + int i; + + kfree(lio->glist_lock); + lio->glist_lock = NULL; + + if (!lio->glist) + return; + + for (i = 0; i < lio->oct_dev->num_iqs; i++) { + do { + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[i]); + kfree(g); + } while (g); + + if (lio->glists_virt_base && lio->glists_virt_base[i] && + lio->glists_dma_base && lio->glists_dma_base[i]) { + lio_dma_free(lio->oct_dev, + lio->glist_entry_size * lio->tx_qsize, + lio->glists_virt_base[i], + lio->glists_dma_base[i]); + } + } + + kfree(lio->glists_virt_base); + lio->glists_virt_base = NULL; + + kfree(lio->glists_dma_base); + lio->glists_dma_base = NULL; + + kfree(lio->glist); + lio->glist = NULL; +} + +/** + * lio_setup_glists - Setup gather lists + * @oct: octeon_device + * @lio: per-network private data + * @num_iqs: count of iqs to allocate + */ +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) +{ + struct octnic_gather *g; + int i, j; + + lio->glist_lock = + kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); + if (!lio->glist_lock) + return -ENOMEM; + + lio->glist = + kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); + if (!lio->glist) { + kfree(lio->glist_lock); + lio->glist_lock = NULL; + return -ENOMEM; + } + + lio->glist_entry_size = + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + /* allocate memory to store virtual and dma base address of + * per glist consistent memory + */ + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), + GFP_KERNEL); + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), + GFP_KERNEL); + + if (!lio->glists_virt_base || !lio->glists_dma_base) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (i = 0; i < num_iqs; i++) { + int numa_node = dev_to_node(&oct->pci_dev->dev); + + spin_lock_init(&lio->glist_lock[i]); + + INIT_LIST_HEAD(&lio->glist[i]); + + lio->glists_virt_base[i] = + lio_dma_alloc(oct, + lio->glist_entry_size * lio->tx_qsize, + &lio->glists_dma_base[i]); + + if (!lio->glists_virt_base[i]) { + lio_delete_glists(lio); + return -ENOMEM; + } + + for (j = 0; j < lio->tx_qsize; j++) { + g = kzalloc_node(sizeof(*g), GFP_KERNEL, + numa_node); + if (!g) + g = kzalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + + g->sg = lio->glists_virt_base[i] + + (j * lio->glist_entry_size); + + g->sg_dma_ptr = lio->glists_dma_base[i] + + (j * lio->glist_entry_size); + + list_add_tail(&g->list, &lio->glist[i]); + } + + if (j != lio->tx_qsize) { + lio_delete_glists(lio); + return -ENOMEM; + } + } + + return 0; +} + +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = cmd; + nctrl.ncmd.s.param1 = param1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, + unsigned int bytes_compl) +{ + struct netdev_queue *netdev_queue = txq; + + netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl); +} + +void octeon_update_tx_completion_counters(void *buf, int reqtype, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb = NULL; + struct octeon_soft_command *sc; + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + finfo = buf; + skb = finfo->skb; + break; + + case REQTYPE_RESP_NET_SG: + case REQTYPE_RESP_NET: + sc = buf; + skb = sc->callback_arg; + break; + + default: + return; + } + + (*pkts_compl)++; + *bytes_compl += skb->len; +} + +int octeon_report_sent_bytes_to_bql(void *buf, int reqtype) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct octeon_soft_command *sc; + struct netdev_queue *txq; + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + finfo = buf; + skb = finfo->skb; + break; + + case REQTYPE_RESP_NET_SG: + case REQTYPE_RESP_NET: + sc = buf; + skb = sc->callback_arg; + break; + + default: + return 0; + } + + txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); + netdev_tx_sent_queue(txq, skb->len); + + return netif_xmit_stopped(txq); +} + +void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) +{ + struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; + struct net_device *netdev = (struct net_device *)nctrl->netpndev; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + u8 *mac; + + if (nctrl->sc_status) + return; + + switch (nctrl->ncmd.s.cmd) { + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: + break; + + case OCTNET_CMD_CHANGE_MACADDR: + mac = ((u8 *)&nctrl->udd[0]) + 2; + if (nctrl->ncmd.s.param1) { + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + int vfidx = nctrl->ncmd.s.param1 - 1; + bool mac_is_admin_assigned = nctrl->ncmd.s.param2; + + if (mac_is_admin_assigned) + netif_info(lio, probe, lio->netdev, + "MAC Address %pM is configured for VF %d\n", + mac, vfidx); + } else { + netif_info(lio, probe, lio->netdev, + " MACAddr changed to %pM\n", + mac); + } + break; + + case OCTNET_CMD_GPIO_ACCESS: + netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); + + break; + + case OCTNET_CMD_ID_ACTIVE: + netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); + + break; + + case OCTNET_CMD_LRO_ENABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); + break; + + case OCTNET_CMD_LRO_DISABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", + netdev->name); + break; + + case OCTNET_CMD_VERBOSE_ENABLE: + dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", + netdev->name); + break; + + case OCTNET_CMD_VERBOSE_DISABLE: + dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", + netdev->name); + break; + + case OCTNET_CMD_VLAN_FILTER_CTL: + if (nctrl->ncmd.s.param1) + dev_info(&oct->pci_dev->dev, + "%s VLAN filter enabled\n", netdev->name); + else + dev_info(&oct->pci_dev->dev, + "%s VLAN filter disabled\n", netdev->name); + break; + + case OCTNET_CMD_ADD_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n", + netdev->name, nctrl->ncmd.s.param1); + break; + + case OCTNET_CMD_DEL_VLAN_FILTER: + dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n", + netdev->name, nctrl->ncmd.s.param1); + break; + + case OCTNET_CMD_SET_SETTINGS: + dev_info(&oct->pci_dev->dev, "%s settings changed\n", + netdev->name); + + break; + + /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_RX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "RX Checksum Offload Enabled\n"); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_RXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "RX Checksum Offload Disabled\n"); + } + break; + + /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL" + * Command passed by NIC driver + */ + case OCTNET_CMD_TNL_TX_CSUM_CTL: + if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) { + netif_info(lio, probe, lio->netdev, + "TX Checksum Offload Enabled\n"); + } else if (nctrl->ncmd.s.param1 == + OCTNET_CMD_TXCSUM_DISABLE) { + netif_info(lio, probe, lio->netdev, + "TX Checksum Offload Disabled\n"); + } + break; + + /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG" + * Command passed by NIC driver + */ + case OCTNET_CMD_VXLAN_PORT_CONFIG: + if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) { + netif_info(lio, probe, lio->netdev, + "VxLAN Destination UDP PORT:%d ADDED\n", + nctrl->ncmd.s.param1); + } else if (nctrl->ncmd.s.more == + OCTNET_CMD_VXLAN_PORT_DEL) { + netif_info(lio, probe, lio->netdev, + "VxLAN Destination UDP PORT:%d DELETED\n", + nctrl->ncmd.s.param1); + } + break; + + case OCTNET_CMD_SET_FLOW_CTL: + netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n"); + break; + + case OCTNET_CMD_QUEUE_COUNT_CTL: + netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n", + nctrl->ncmd.s.param1); + break; + + default: + dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, + nctrl->ncmd.s.cmd); + } +} + +void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac) +{ + bool macaddr_changed = false; + struct net_device *netdev; + struct lio *lio; + + rtnl_lock(); + + netdev = oct->props[0].netdev; + lio = GET_LIO(netdev); + + lio->linfo.macaddr_is_admin_asgnd = true; + + if (!ether_addr_equal(netdev->dev_addr, mac)) { + macaddr_changed = true; + eth_hw_addr_set(netdev, mac); + ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac); + call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev); + } + + rtnl_unlock(); + + if (macaddr_changed) + dev_info(&oct->pci_dev->dev, + "PF changed VF's MAC address to %pM\n", mac); + + /* no need to notify the firmware of the macaddr change because + * the PF did that already + */ +} + +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq) +{ + struct net_device *netdev = oct->props[0].netdev; + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; + + queue_delayed_work(wq->wq, &wq->wk.work, + msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS)); +} + +static void octnet_poll_check_rxq_oom_status(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + struct octeon_device *oct = lio->oct_dev; + int q_no = wk->ctxul; + struct octeon_droq *droq = oct->droq[q_no]; + + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq) + return; + + if (octeon_retry_droq_refill(droq)) + octeon_schedule_rxq_oom_work(oct, droq); +} + +int setup_rx_oom_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q, q_no; + + for (q = 0; q < oct->num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + wq = &lio->rxq_status_wq[q_no]; + wq->wq = alloc_workqueue("rxq-oom-status", + WQ_MEM_RECLAIM, 0); + if (!wq->wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&wq->wk.work, + octnet_poll_check_rxq_oom_status); + wq->wk.ctxptr = lio; + wq->wk.ctxul = q_no; + } + + return 0; +} + +void cleanup_rx_oom_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct cavium_wq *wq; + int q_no; + + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + wq = &lio->rxq_status_wq[q_no]; + if (wq->wq) { + cancel_delayed_work_sync(&wq->wk.work); + destroy_workqueue(wq->wq); + wq->wq = NULL; + } + } +} + +/* Runs in interrupt context. */ +static void lio_update_txq_status(struct octeon_device *oct, int iq_num) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; + struct net_device *netdev; + struct lio *lio; + + netdev = oct->props[iq->ifidx].netdev; + + /* This is needed because the first IQ does not have + * a netdev associated with it. + */ + if (!netdev) + return; + + lio = GET_LIO(netdev); + if (__netif_subqueue_stopped(netdev, iq->q_index) && + lio->linfo.link.s.link_up && + (!octnet_iq_is_full(oct, iq_num))) { + netif_wake_subqueue(netdev, iq->q_index); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num, + tx_restart, 1); + } +} + +/** + * octeon_setup_droq - Setup output queue + * @oct: octeon device + * @q_no: which queue + * @num_descs: how many descriptors + * @desc_size: size of each descriptor + * @app_ctx: application context + */ +static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) +{ + int ret_val; + + dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); + /* droq creation and local register settings. */ + ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (ret_val < 0) + return ret_val; + + if (ret_val == 1) { + dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); + return 0; + } + + /* Enable the droq queues */ + octeon_set_droq_pkt_op(oct, q_no, 1); + + /* Send Credit for Octeon Output queues. Credits are always + * sent after the output queue is enabled. + */ + writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg); + + return ret_val; +} + +/** + * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer. + * @octeon_id:octeon device id. + * @skbuff: skbuff struct to be passed to network layer. + * @len: size of total data received. + * @rh: Control header associated with the packet + * @param: additional control data with the packet + * @arg: farg registered in droq_ops + */ +static void +liquidio_push_packet(u32 __maybe_unused octeon_id, + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param, + void *arg) +{ + struct net_device *netdev = (struct net_device *)arg; + struct octeon_droq *droq = + container_of(param, struct octeon_droq, napi); + struct sk_buff *skb = (struct sk_buff *)skbuff; + struct skb_shared_hwtstamps *shhwtstamps; + struct napi_struct *napi = param; + u16 vtag = 0; + u32 r_dh_off; + u64 ns; + + if (netdev) { + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + /* Do not proceed if the interface is not in RUNNING state. */ + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + recv_buffer_free(skb); + droq->stats.rx_dropped++; + return; + } + + skb->dev = netdev; + + skb_record_rx_queue(skb, droq->q_no); + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + /* For Paged allocation use the frags */ + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } + + r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT; + + if (oct->ptp_enable) { + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at + * the beginning of the packet. + */ + if (ifstate_check + (lio, + LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data + r_dh_off), + sizeof(ns)); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + + lio->ptp_adjust); + } + } + } + + if (rh->r_dh.has_hash) { + __be32 *hash_be = (__be32 *)(skb->data + r_dh_off); + u32 hash = be32_to_cpu(*hash_be); + + skb_set_hash(skb, hash, PKT_HASH_TYPE_L4); + r_dh_off -= BYTES_PER_DHLEN_UNIT; + } + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); + + if ((netdev->features & NETIF_F_RXCSUM) && + (((rh->r_dh.encap_on) && + (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) || + (!(rh->r_dh.encap_on) && + ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) == + CNNIC_CSUM_VERIFIED)))) + /* checksum has already been verified */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + /* Setting Encapsulation field on basis of status received + * from the firmware + */ + if (rh->r_dh.encap_on) { + skb->encapsulation = 1; + skb->csum_level = 1; + droq->stats.rx_vxlan++; + } + + /* inbound VLAN tag */ + if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && + rh->r_dh.vlan) { + u16 priority = rh->r_dh.priority; + u16 vid = rh->r_dh.vlan; + + vtag = (priority << VLAN_PRIO_SHIFT) | vid; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag); + } + + napi_gro_receive(napi, skb); + + droq->stats.rx_bytes_received += len - + rh->r_dh.len * BYTES_PER_DHLEN_UNIT; + droq->stats.rx_pkts_received++; + } else { + recv_buffer_free(skb); + } +} + +/** + * napi_schedule_wrapper - wrapper for calling napi_schedule + * @param: parameters to pass to napi_schedule + * + * Used when scheduling on different CPUs + */ +static void napi_schedule_wrapper(void *param) +{ + struct napi_struct *napi = param; + + napi_schedule(napi); +} + +/** + * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode + * @arg: pointer to octeon output queue + */ +static void liquidio_napi_drv_callback(void *arg) +{ + struct octeon_device *oct; + struct octeon_droq *droq = arg; + int this_cpu = smp_processor_id(); + + oct = droq->oct_dev; + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) || + droq->cpu_id == this_cpu) { + napi_schedule_irqoff(&droq->napi); + } else { + INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi); + smp_call_function_single_async(droq->cpu_id, &droq->csd); + } +} + +/** + * liquidio_napi_poll - Entry point for NAPI polling + * @napi: NAPI structure + * @budget: maximum number of items to process + */ +static int liquidio_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_instr_queue *iq; + struct octeon_device *oct; + struct octeon_droq *droq; + int tx_done = 0, iq_no; + int work_done; + + droq = container_of(napi, struct octeon_droq, napi); + oct = droq->oct_dev; + iq_no = droq->q_no; + + /* Handle Droq descriptors */ + work_done = octeon_droq_process_poll_pkts(oct, droq, budget); + + /* Flush the instruction queue */ + iq = oct->instr_queue[iq_no]; + if (iq) { + /* TODO: move this check to inside octeon_flush_iq, + * once check_db_timeout is removed + */ + if (atomic_read(&iq->instr_pending)) + /* Process iq buffers with in the budget limits */ + tx_done = octeon_flush_iq(oct, iq, budget); + else + tx_done = 1; + /* Update iq read-index rather than waiting for next interrupt. + * Return back if tx_done is false. + */ + /* sub-queue status update */ + lio_update_txq_status(oct, iq_no); + } else { + dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n", + __func__, iq_no); + } + +#define MAX_REG_CNT 2000000U + /* force enable interrupt if reg cnts are high to avoid wraparound */ + if ((work_done < budget && tx_done) || + (iq && iq->pkt_in_done >= MAX_REG_CNT) || + (droq->pkt_count >= MAX_REG_CNT)) { + napi_complete_done(napi, work_done); + + octeon_enable_irq(droq->oct_dev, droq->q_no); + return 0; + } + + return (!tx_done) ? (budget) : (work_done); +} + +/** + * liquidio_setup_io_queues - Setup input and output queues + * @octeon_dev: octeon device + * @ifidx: Interface index + * @num_iqs: input io queue count + * @num_oqs: output io queue count + * + * Note: Queues are with respect to the octeon device. Thus + * an input queue is for egress packets, and output queues + * are for ingress packets. + */ +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs) +{ + struct octeon_droq_ops droq_ops; + struct net_device *netdev; + struct octeon_droq *droq; + struct napi_struct *napi; + int cpu_id_modulus; + int num_tx_descs; + struct lio *lio; + int retval = 0; + int q, q_no; + int cpu_id; + + netdev = octeon_dev->props[ifidx].netdev; + + lio = GET_LIO(netdev); + + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + droq_ops.farg = netdev; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); + + /* set up DROQs. */ + for (q = 0; q < num_oqs; q++) { + q_no = lio->linfo.rxpciq[q].s.q_no; + dev_dbg(&octeon_dev->pci_dev->dev, + "%s index:%d linfo.rxpciq.s.q_no:%d\n", + __func__, q, q_no); + retval = octeon_setup_droq( + octeon_dev, q_no, + CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev), + lio->ifidx), + NULL); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "%s : Runtime DROQ(RxQ) creation failed.\n", + __func__); + return 1; + } + + droq = octeon_dev->droq[q_no]; + napi = &droq->napi; + dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n", + (u64)netdev, (u64)octeon_dev); + netif_napi_add(netdev, napi, liquidio_napi_poll); + + /* designate a CPU for this droq */ + droq->cpu_id = cpu_id; + cpu_id++; + if (cpu_id >= cpu_id_modulus) + cpu_id = 0; + + octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); + } + + if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) { + /* 23XX PF/VF can send/recv control messages (via the first + * PF/VF-owned droq) from the firmware even if the ethX + * interface is down, so that's why poll_mode must be off + * for the first droq. + */ + octeon_dev->droq[0]->ops.poll_mode = 0; + } + + /* set up IQs. */ + for (q = 0; q < num_iqs; q++) { + num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF( + octeon_get_conf(octeon_dev), lio->ifidx); + retval = octeon_setup_iq(octeon_dev, ifidx, q, + lio->linfo.txpciq[q], num_tx_descs, + netdev_get_tx_queue(netdev, q)); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime IQ(TxQ) creation failed.\n", + __func__); + return 1; + } + + /* XPS */ + if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on && + octeon_dev->ioq_vector) { + struct octeon_ioq_vector *ioq_vector; + + ioq_vector = &octeon_dev->ioq_vector[q]; + netif_set_xps_queue(netdev, + &ioq_vector->affinity_mask, + ioq_vector->iq_index); + } + } + + return 0; +} + +static +int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret) +{ + struct octeon_device *oct = droq->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + } else { + if (ret & MSIX_PO_INT) { + if (OCTEON_CN23XX_VF(oct)) + dev_err(&oct->pci_dev->dev, + "should not come here should not get rx when poll mode = 0 for vf\n"); + tasklet_schedule(&oct_priv->droq_tasklet); + return 1; + } + /* this will be flushed periodically by check iq db */ + if (ret & MSIX_PI_INT) + return 0; + } + + return 0; +} + +irqreturn_t +liquidio_msix_intr_handler(int __maybe_unused irq, void *dev) +{ + struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev; + struct octeon_device *oct = ioq_vector->oct_dev; + struct octeon_droq *droq = oct->droq[ioq_vector->droq_index]; + u64 ret; + + ret = oct->fn_list.msix_interrupt_handler(ioq_vector); + + if (ret & MSIX_PO_INT || ret & MSIX_PI_INT) + liquidio_schedule_msix_droq_pkt_handler(droq, ret); + + return IRQ_HANDLED; +} + +/** + * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler + * @oct: octeon device + */ +static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct octeon_droq *droq; + u64 oq_no; + + if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); + oq_no++) { + if (!(oct->droq_intr & BIT_ULL(oq_no))) + continue; + + droq = oct->droq[oq_no]; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + oct_priv->napi_mask |= BIT_ULL(oq_no); + } else { + tasklet_schedule(&oct_priv->droq_tasklet); + } + } + } +} + +/** + * liquidio_legacy_intr_handler - Interrupt handler for octeon + * @irq: unused + * @dev: octeon device + */ +static +irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + irqreturn_t ret; + + /* Disable our interrupts for the duration of ISR */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + ret = oct->fn_list.process_interrupt_regs(oct); + + if (ret == IRQ_HANDLED) + liquidio_schedule_droq_pkt_handlers(oct); + + /* Re-enable our interrupts */ + if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return ret; +} + +/** + * octeon_setup_interrupt - Setup interrupt for octeon device + * @oct: octeon device + * @num_ioqs: number of queues + * + * Enable interrupt in Octeon device as given in the PCI interrupt mask. + */ +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs) +{ + struct msix_entry *msix_entries; + char *queue_irq_names = NULL; + int i, num_interrupts = 0; + int num_alloc_ioq_vectors; + char *aux_irq_name = NULL; + int num_ioq_vectors; + int irqret, err; + + if (oct->msix_on) { + oct->num_msix_irqs = num_ioqs; + if (OCTEON_CN23XX_PF(oct)) { + num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1; + + /* one non ioq interrupt for handling + * sli_mac_pf_int_sum + */ + oct->num_msix_irqs += 1; + } else if (OCTEON_CN23XX_VF(oct)) { + num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF; + } + + /* allocate storage for the names assigned to each irq */ + oct->irq_name_storage = + kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) { + dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n"); + return -ENOMEM; + } + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + aux_irq_name = &queue_irq_names + [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)]; + + oct->msix_entries = kcalloc(oct->num_msix_irqs, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!oct->msix_entries) { + dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n"); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return -ENOMEM; + } + + msix_entries = (struct msix_entry *)oct->msix_entries; + + /*Assumption is that pf msix vectors start from pf srn to pf to + * trs and not from 0. if not change this code + */ + if (OCTEON_CN23XX_PF(oct)) { + for (i = 0; i < oct->num_msix_irqs - 1; i++) + msix_entries[i].entry = + oct->sriov_info.pf_srn + i; + + msix_entries[oct->num_msix_irqs - 1].entry = + oct->sriov_info.trs; + } else if (OCTEON_CN23XX_VF(oct)) { + for (i = 0; i < oct->num_msix_irqs; i++) + msix_entries[i].entry = i; + } + num_alloc_ioq_vectors = pci_enable_msix_range( + oct->pci_dev, msix_entries, + oct->num_msix_irqs, + oct->num_msix_irqs); + if (num_alloc_ioq_vectors < 0) { + dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n"); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return num_alloc_ioq_vectors; + } + + dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n"); + + num_ioq_vectors = oct->num_msix_irqs; + /* For PF, there is one non-ioq interrupt handler */ + if (OCTEON_CN23XX_PF(oct)) { + num_ioq_vectors -= 1; + + snprintf(aux_irq_name, INTRNAMSIZ, + "LiquidIO%u-pf%u-aux", oct->octeon_id, + oct->pf_num); + irqret = request_irq( + msix_entries[num_ioq_vectors].vector, + liquidio_legacy_intr_handler, 0, + aux_irq_name, oct); + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + } + for (i = 0 ; i < num_ioq_vectors ; i++) { + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, i); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(i)], + INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, i); + + irqret = request_irq(msix_entries[i].vector, + liquidio_msix_intr_handler, 0, + &queue_irq_names[IRQ_NAME_OFF(i)], + &oct->ioq_vector[i]); + + if (irqret) { + dev_err(&oct->pci_dev->dev, + "Request_irq failed for MSIX interrupt Error: %d\n", + irqret); + /* Freeing the non-ioq irq vector here . */ + free_irq(msix_entries[num_ioq_vectors].vector, + oct); + + while (i) { + i--; + /* clearing affinity mask. */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + oct->msix_entries = NULL; + return irqret; + } + oct->ioq_vector[i].vector = msix_entries[i].vector; + /* assign the cpu mask for this msix interrupt vector */ + irq_set_affinity_hint(msix_entries[i].vector, + &oct->ioq_vector[i].affinity_mask + ); + } + dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n", + oct->octeon_id); + } else { + err = pci_enable_msi(oct->pci_dev); + if (err) + dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", + err); + else + oct->flags |= LIO_FLAG_MSI_ENABLED; + + /* allocate storage for the names assigned to the irq */ + oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL); + if (!oct->irq_name_storage) + return -ENOMEM; + + queue_irq_names = oct->irq_name_storage; + + if (OCTEON_CN23XX_PF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-pf%u-rxtx-%u", + oct->octeon_id, oct->pf_num, 0); + + if (OCTEON_CN23XX_VF(oct)) + snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ, + "LiquidIO%u-vf%u-rxtx-%u", + oct->octeon_id, oct->vf_num, 0); + + irqret = request_irq(oct->pci_dev->irq, + liquidio_legacy_intr_handler, + IRQF_SHARED, + &queue_irq_names[IRQ_NAME_OFF(0)], oct); + if (irqret) { + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", + irqret); + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + return irqret; + } + } + return 0; +} + +/** + * liquidio_change_mtu - Net device change_mtu + * @netdev: network device + * @new_mtu: the new max transmit unit size + */ +int liquidio_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int ret = 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0); + if (!sc) { + netif_info(lio, rx_err, lio->netdev, + "Failed to allocate soft command\n"); + return -ENOMEM; + } + + ncmd = (union octnet_cmd *)sc->virtdptr; + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU; + ncmd->s.param1 = new_mtu; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_CMD, 0, 0, 0); + + ret = octeon_send_soft_command(oct, sc); + if (ret == IQ_SEND_FAILED) { + netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n"); + octeon_free_soft_command(oct, sc); + return -EINVAL; + } + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + ret = wait_for_sc_completion_timeout(oct, sc, 0); + if (ret) + return ret; + + if (sc->sc_status) { + WRITE_ONCE(sc->caller_is_done, true); + return -EINVAL; + } + + netdev->mtu = new_mtu; + lio->mtu = new_mtu; + + WRITE_ONCE(sc->caller_is_done, true); + return 0; +} + +int lio_wait_for_clean_oq(struct octeon_device *oct) +{ + int retry = 100, pending_pkts = 0; + int idx; + + do { + pending_pkts = 0; + + for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) { + if (!(oct->io_qmask.oq & BIT_ULL(idx))) + continue; + pending_pkts += + atomic_read(&oct->droq[idx]->pkts_pending); + } + + if (pending_pkts > 0) + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pending_pkts; +} + +static void +octnet_nic_stats_callback(struct octeon_device *oct_dev, + u32 status, void *ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr; + struct oct_nic_stats_resp *resp = + (struct oct_nic_stats_resp *)sc->virtrptr; + struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire; + struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost; + struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire; + struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost; + + if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->stats, + (sizeof(struct oct_link_stats)) >> 3); + + /* RX link-level stats */ + rstats->total_rcvd = rsp_rstats->total_rcvd; + rstats->bytes_rcvd = rsp_rstats->bytes_rcvd; + rstats->total_bcst = rsp_rstats->total_bcst; + rstats->total_mcst = rsp_rstats->total_mcst; + rstats->runts = rsp_rstats->runts; + rstats->ctl_rcvd = rsp_rstats->ctl_rcvd; + /* Accounts for over/under-run of buffers */ + rstats->fifo_err = rsp_rstats->fifo_err; + rstats->dmac_drop = rsp_rstats->dmac_drop; + rstats->fcs_err = rsp_rstats->fcs_err; + rstats->jabber_err = rsp_rstats->jabber_err; + rstats->l2_err = rsp_rstats->l2_err; + rstats->frame_err = rsp_rstats->frame_err; + rstats->red_drops = rsp_rstats->red_drops; + + /* RX firmware stats */ + rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd; + rstats->fw_total_fwd = rsp_rstats->fw_total_fwd; + rstats->fw_total_mcast = rsp_rstats->fw_total_mcast; + rstats->fw_total_bcast = rsp_rstats->fw_total_bcast; + rstats->fw_err_pko = rsp_rstats->fw_err_pko; + rstats->fw_err_link = rsp_rstats->fw_err_link; + rstats->fw_err_drop = rsp_rstats->fw_err_drop; + rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan; + rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err; + + /* Number of packets that are LROed */ + rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts; + /* Number of octets that are LROed */ + rstats->fw_lro_octs = rsp_rstats->fw_lro_octs; + /* Number of LRO packets formed */ + rstats->fw_total_lro = rsp_rstats->fw_total_lro; + /* Number of times lRO of packet aborted */ + rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts; + rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port; + rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq; + rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval; + rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer; + /* intrmod: packet forward rate */ + rstats->fwd_rate = rsp_rstats->fwd_rate; + + /* TX link-level stats */ + tstats->total_pkts_sent = rsp_tstats->total_pkts_sent; + tstats->total_bytes_sent = rsp_tstats->total_bytes_sent; + tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent; + tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent; + tstats->ctl_sent = rsp_tstats->ctl_sent; + /* Packets sent after one collision*/ + tstats->one_collision_sent = rsp_tstats->one_collision_sent; + /* Packets sent after multiple collision*/ + tstats->multi_collision_sent = rsp_tstats->multi_collision_sent; + /* Packets not sent due to max collisions */ + tstats->max_collision_fail = rsp_tstats->max_collision_fail; + /* Packets not sent due to max deferrals */ + tstats->max_deferral_fail = rsp_tstats->max_deferral_fail; + /* Accounts for over/under-run of buffers */ + tstats->fifo_err = rsp_tstats->fifo_err; + tstats->runts = rsp_tstats->runts; + /* Total number of collisions detected */ + tstats->total_collisions = rsp_tstats->total_collisions; + + /* firmware stats */ + tstats->fw_total_sent = rsp_tstats->fw_total_sent; + tstats->fw_total_fwd = rsp_tstats->fw_total_fwd; + tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent; + tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent; + tstats->fw_err_pko = rsp_tstats->fw_err_pko; + tstats->fw_err_pki = rsp_tstats->fw_err_pki; + tstats->fw_err_link = rsp_tstats->fw_err_link; + tstats->fw_err_drop = rsp_tstats->fw_err_drop; + tstats->fw_tso = rsp_tstats->fw_tso; + tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd; + tstats->fw_err_tso = rsp_tstats->fw_err_tso; + tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan; + + resp->status = 1; + } else { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); + resp->status = -1; + } +} + +static int lio_fetch_vf_stats(struct lio *lio) +{ + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_vf_stats_resp *resp; + + int retval; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_vf_stats_resp), + 0); + + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + retval = -ENOMEM; + goto lio_fetch_vf_stats_exit; + } + + resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_VF_PORT_STATS, 0, 0, 0); + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + goto lio_fetch_vf_stats_exit; + } + + retval = + wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "sc OPCODE_NIC_VF_PORT_STATS command failed\n"); + goto lio_fetch_vf_stats_exit; + } + + if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) { + octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt, + (sizeof(u64)) >> 3); + + if (resp->spoofmac_cnt != 0) { + dev_warn(&oct_dev->pci_dev->dev, + "%llu Spoofed packets detected\n", + resp->spoofmac_cnt); + } + } + WRITE_ONCE(sc->caller_is_done, 1); + +lio_fetch_vf_stats_exit: + return retval; +} + +void lio_fetch_stats(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = wk->ctxptr; + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_nic_stats_resp *resp; + unsigned long time_in_jiffies; + int retval; + + if (OCTEON_CN23XX_PF(oct_dev)) { + /* report spoofchk every 2 seconds */ + if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) && + (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) && + oct_dev->sriov_info.num_vfs_alloced) { + lio_fetch_vf_stats(lio); + } + + oct_dev->vfstats_poll++; + } + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_nic_stats_resp), + 0); + + if (!sc) { + dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n"); + goto lio_fetch_stats_exit; + } + + resp = (struct oct_nic_stats_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_stats_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_PORT_STATS, 0, 0, 0); + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + goto lio_fetch_stats_exit; + } + + retval = wait_for_sc_completion_timeout(oct_dev, sc, + (2 * LIO_SC_MAX_TMO_MS)); + if (retval) { + dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n"); + goto lio_fetch_stats_exit; + } + + octnet_nic_stats_callback(oct_dev, sc->sc_status, sc); + WRITE_ONCE(sc->caller_is_done, true); + +lio_fetch_stats_exit: + time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS); + if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) + schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies); + + return; +} + +int liquidio_set_speed(struct lio *lio, int speed) +{ + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int retval; + u32 var; + + if (oct->speed_setting == speed) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n", + __func__); + return -EOPNOTSUPP; + } + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_SET; + ncmd->s.param1 = speed; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + retval = -EBUSY; + } else { + /* Wait for response or timeout */ + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + retval = resp->status; + + if (retval) { + dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n", + __func__, retval); + WRITE_ONCE(sc->caller_is_done, true); + + return -EIO; + } + + var = be32_to_cpu((__force __be32)resp->speed); + if (var != speed) { + dev_err(&oct->pci_dev->dev, + "%s: setting failed speed= %x, expect %x\n", + __func__, var, speed); + } + + oct->speed_setting = var; + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +int liquidio_get_speed(struct lio *lio) +{ + struct octeon_device *oct = lio->oct_dev; + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int retval; + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), + 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_SPEED_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + retval = -EIO; + } else { + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, + "%s failed retval=%d\n", __func__, retval); + retval = -EIO; + } else { + u32 var; + + var = be32_to_cpu((__force __be32)resp->speed); + oct->speed_setting = var; + if (var == 0xffff) { + /* unable to access boot variables + * get the default value based on the NIC type + */ + if (oct->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + oct->no_speed_setting = 1; + oct->speed_setting = 25; + } else { + oct->speed_setting = 10; + } + } + + } + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +int liquidio_set_fec(struct lio *lio, int on_off) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + if (oct->props[lio->ifidx].fec == on_off) + return 0; + + if (!OCTEON_CN23XX_PF(oct)) { + dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n", + __func__); + return -1; + } + + if (oct->speed_boot != 25) { + dev_err(&oct->pci_dev->dev, + "Set FEC only when link speed is 25G during insmod\n"); + return -1; + } + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, + "Failed to allocate soft command\n"); + return -ENOMEM; + } + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_SET; + ncmd->s.param1 = on_off; + /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */ + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, "Failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (-EIO); + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (var != on_off) { + dev_err(&oct->pci_dev->dev, + "Setting failed fec= %x, expect %x\n", + var, on_off); + oct->props[lio->ifidx].fec = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + } + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } + + return retval; +} + +int liquidio_get_fec(struct lio *lio) +{ + struct oct_nic_seapi_resp *resp; + struct octeon_soft_command *sc; + struct octeon_device *oct; + union octnet_cmd *ncmd; + int retval; + u32 var; + + oct = lio->oct_dev; + + sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + sizeof(struct oct_nic_seapi_resp), 0); + if (!sc) + return -ENOMEM; + + ncmd = sc->virtdptr; + resp = sc->virtrptr; + memset(resp, 0, sizeof(struct oct_nic_seapi_resp)); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ncmd->u64 = 0; + ncmd->s.cmd = SEAPI_CMD_FEC_GET; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_UBOOT_CTL, 0, 0, 0); + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_info(&oct->pci_dev->dev, + "%s: Failed to send soft command\n", __func__); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + var = be32_to_cpu(resp->fec_setting); + resp->fec_setting = var; + if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS) + oct->props[lio->ifidx].fec = 1; + else + oct->props[lio->ifidx].fec = 0; + + WRITE_ONCE(sc->caller_is_done, true); + + if (oct->props[lio->ifidx].fec != + oct->props[lio->ifidx].fec_boot) { + dev_dbg(&oct->pci_dev->dev, + "Reload driver to change fec to %s\n", + oct->props[lio->ifidx].fec ? "on" : "off"); + } + + return retval; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c new file mode 100644 index 000000000..2c10ae3f7 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -0,0 +1,3182 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/ethtool.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/pci.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" + +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs); + +struct oct_intrmod_resp { + u64 rh; + struct oct_intrmod_cfg intrmod; + u64 status; +}; + +struct oct_mdio_cmd_resp { + u64 rh; + struct oct_mdio_cmd resp; + u64 status; +}; + +#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) + +/* Octeon's interface mode of operation */ +enum { + INTERFACE_MODE_DISABLED, + INTERFACE_MODE_RGMII, + INTERFACE_MODE_GMII, + INTERFACE_MODE_SPI, + INTERFACE_MODE_PCIE, + INTERFACE_MODE_XAUI, + INTERFACE_MODE_SGMII, + INTERFACE_MODE_PICMG, + INTERFACE_MODE_NPI, + INTERFACE_MODE_LOOP, + INTERFACE_MODE_SRIO, + INTERFACE_MODE_ILK, + INTERFACE_MODE_RXAUI, + INTERFACE_MODE_QSGMII, + INTERFACE_MODE_AGL, + INTERFACE_MODE_XLAUI, + INTERFACE_MODE_XFI, + INTERFACE_MODE_10G_KR, + INTERFACE_MODE_40G_KR4, + INTERFACE_MODE_MIXED, +}; + +#define OCT_ETHTOOL_REGDUMP_LEN 4096 +#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11) +#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2) +#define OCT_ETHTOOL_REGSVER 1 + +/* statistics of PF */ +static const char oct_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", + "tx_errors", + "rx_dropped", + "tx_dropped", + + "tx_total_sent", + "tx_total_fwd", + "tx_err_pko", + "tx_err_pki", + "tx_err_link", + "tx_err_drop", + + "tx_tso", + "tx_tso_packets", + "tx_tso_err", + "tx_vxlan", + + "tx_mcast", + "tx_bcast", + + "mac_tx_total_pkts", + "mac_tx_total_bytes", + "mac_tx_mcast_pkts", + "mac_tx_bcast_pkts", + "mac_tx_ctl_packets", + "mac_tx_total_collisions", + "mac_tx_one_collision", + "mac_tx_multi_collision", + "mac_tx_max_collision_fail", + "mac_tx_max_deferral_fail", + "mac_tx_fifo_err", + "mac_tx_runts", + + "rx_total_rcvd", + "rx_total_fwd", + "rx_mcast", + "rx_bcast", + "rx_jabber_err", + "rx_l2_err", + "rx_frame_err", + "rx_err_pko", + "rx_err_link", + "rx_err_drop", + + "rx_vxlan", + "rx_vxlan_err", + + "rx_lro_pkts", + "rx_lro_bytes", + "rx_total_lro", + + "rx_lro_aborts", + "rx_lro_aborts_port", + "rx_lro_aborts_seq", + "rx_lro_aborts_tsval", + "rx_lro_aborts_timer", + "rx_fwd_rate", + + "mac_rx_total_rcvd", + "mac_rx_bytes", + "mac_rx_total_bcst", + "mac_rx_total_mcst", + "mac_rx_runts", + "mac_rx_ctl_packets", + "mac_rx_fifo_err", + "mac_rx_dma_drop", + "mac_rx_fcs_err", + + "link_state_changes", +}; + +/* statistics of VF */ +static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = { + "rx_packets", + "tx_packets", + "rx_bytes", + "tx_bytes", + "rx_errors", + "tx_errors", + "rx_dropped", + "tx_dropped", + "rx_mcast", + "tx_mcast", + "rx_bcast", + "tx_bcast", + "link_state_changes", +}; + +/* statistics of host tx queue */ +static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "bytes", + "dropped", + "iq_busy", + "sgentry_sent", + + "fw_instr_posted", + "fw_instr_processed", + "fw_instr_dropped", + "fw_bytes_sent", + + "tso", + "vxlan", + "txq_restart", +}; + +/* statistics of host rx queue */ +static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { + "packets", + "bytes", + "dropped", + "dropped_nomem", + "dropped_toomany", + "fw_dropped", + "fw_pkts_received", + "fw_bytes_received", + "fw_dropped_nodispatch", + + "vxlan", + "buffer_alloc_failure", +}; + +/* LiquidIO driver private flags */ +static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = { +}; + +#define OCTNIC_NCMD_AUTONEG_ON 0x1 +#define OCTNIC_NCMD_PHY_ON 0x2 + +static int lio_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *ecmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_link_info *linfo; + + linfo = &lio->linfo; + + ethtool_link_ksettings_zero_link_mode(ecmd, supported); + ethtool_link_ksettings_zero_link_mode(ecmd, advertising); + + switch (linfo->link.s.phy_type) { + case LIO_PHY_PORT_TP: + ecmd->base.port = PORT_TP; + ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, TP); + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + + break; + + case LIO_PHY_PORT_FIBRE: + if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI || + linfo->link.s.if_mode == INTERFACE_MODE_RXAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XLAUI || + linfo->link.s.if_mode == INTERFACE_MODE_XFI) { + dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); + ecmd->base.transceiver = XCVR_EXTERNAL; + } else { + dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", + linfo->link.s.if_mode); + } + + ecmd->base.port = PORT_FIBRE; + ecmd->base.autoneg = AUTONEG_DISABLE; + ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE); + + ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause); + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (OCTEON_CN23XX_PF(oct)) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, 25000baseCR_Full); + + if (oct->no_speed_setting == 0) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + } + + if (oct->no_speed_setting == 0) { + liquidio_get_speed(lio); + liquidio_get_fec(lio); + } else { + oct->speed_setting = 25; + } + + if (oct->speed_setting == 10) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + if (oct->speed_setting == 25) { + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + + if (oct->no_speed_setting) + break; + + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_RS); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, FEC_NONE); + /*FEC_OFF*/ + if (oct->props[lio->ifidx].fec == 1) { + /* ETHTOOL_FEC_RS */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_RS); + } else { + /* ETHTOOL_FEC_OFF */ + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, FEC_NONE); + } + } else { /* VF */ + if (linfo->link.s.speed == 10000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 10000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 10000baseCR_Full); + } + + if (linfo->link.s.speed == 25000) { + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, supported, + 25000baseCR_Full); + + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseKR_Full); + ethtool_link_ksettings_add_link_mode + (ecmd, advertising, + 25000baseCR_Full); + } + } + } else { + ethtool_link_ksettings_add_link_mode(ecmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(ecmd, advertising, + 10000baseT_Full); + } + break; + } + + if (linfo->link.s.link_up) { + ecmd->base.speed = linfo->link.s.speed; + ecmd->base.duplex = linfo->link.s.duplex; + } else { + ecmd->base.speed = SPEED_UNKNOWN; + ecmd->base.duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static int lio_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *ecmd) +{ + const int speed = ecmd->base.speed; + struct lio *lio = GET_LIO(netdev); + struct oct_link_info *linfo; + struct octeon_device *oct; + + oct = lio->oct_dev; + + linfo = &lio->linfo; + + if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID)) + return -EOPNOTSUPP; + + if (oct->no_speed_setting) { + dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n", + __func__); + return -EOPNOTSUPP; + } + + if ((ecmd->base.duplex != DUPLEX_UNKNOWN && + ecmd->base.duplex != linfo->link.s.duplex) || + ecmd->base.autoneg != AUTONEG_DISABLE || + (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 && + ecmd->base.speed != SPEED_UNKNOWN)) + return -EOPNOTSUPP; + + if ((oct->speed_boot == speed / 1000) && + oct->speed_boot == oct->speed_setting) + return 0; + + liquidio_set_speed(lio, speed / 1000); + + dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n", + oct->speed_setting); + + return 0; +} + +static void +lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct lio *lio; + struct octeon_device *oct; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); + strcpy(drvinfo->driver, "liquidio"); + strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + ETHTOOL_FWVERS_LEN); + strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); +} + +static void +lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct octeon_device *oct; + struct lio *lio; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); + strcpy(drvinfo->driver, "liquidio_vf"); + strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + ETHTOOL_FWVERS_LEN); + strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); +} + +static int +lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL; + nctrl.ncmd.s.param1 = num_queues; + nctrl.ncmd.s.param2 = num_queues; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n", + ret); + return -1; + } + + return 0; +} + +static void +lio_ethtool_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; + u32 combined_count = 0, max_combined = 0; + + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); + + max_rx = CFG_GET_OQ_MAX_Q(conf6x); + max_tx = CFG_GET_IQ_MAX_Q(conf6x); + rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); + tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); + } else if (OCTEON_CN23XX_PF(oct)) { + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, cn23xx_pf); + + max_combined = CFG_GET_IQ_MAX_Q(conf23_pf); + } + combined_count = oct->num_iqs; + } else if (OCTEON_CN23XX_VF(oct)) { + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + combined_count = oct->num_iqs; + } + + channel->max_rx = max_rx; + channel->max_tx = max_tx; + channel->max_combined = max_combined; + channel->rx_count = rx_count; + channel->tx_count = tx_count; + channel->combined_count = combined_count; +} + +static int +lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs) +{ + struct msix_entry *msix_entries; + int num_msix_irqs = 0; + int i; + + if (!oct->msix_on) + return 0; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + if (OCTEON_CN23XX_PF(oct)) + num_msix_irqs = oct->num_msix_irqs - 1; + else if (OCTEON_CN23XX_VF(oct)) + num_msix_irqs = oct->num_msix_irqs; + + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < num_msix_irqs; i++) { + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } + } + + /* non-iov vector's argument is oct struct */ + if (OCTEON_CN23XX_PF(oct)) + free_irq(msix_entries[i].vector, oct); + + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + } + + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + + if (octeon_allocate_ioq_vector(oct, num_ioqs)) { + dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); + return -1; + } + + if (octeon_setup_interrupt(oct, num_ioqs)) { + dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n"); + return -1; + } + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + return 0; +} + +static int +lio_ethtool_set_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + u32 combined_count, max_combined; + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) { + dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n"); + return -EINVAL; + } + + if (!channel->combined_count || channel->other_count || + channel->rx_count || channel->tx_count) + return -EINVAL; + + combined_count = channel->combined_count; + + if (OCTEON_CN23XX_PF(oct)) { + if (oct->sriov_info.sriov_enabled) { + max_combined = lio->linfo.num_txpciq; + } else { + struct octeon_config *conf23_pf = + CHIP_CONF(oct, + cn23xx_pf); + + max_combined = + CFG_GET_IQ_MAX_Q(conf23_pf); + } + } else if (OCTEON_CN23XX_VF(oct)) { + u64 reg_val = 0ULL; + u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0); + + reg_val = octeon_read_csr64(oct, ctrl); + reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS; + max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK; + } else { + return -EINVAL; + } + + if (combined_count > max_combined || combined_count < 1) + return -EINVAL; + + if (combined_count == oct->num_iqs) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(dev)) { + dev->netdev_ops->ndo_stop(dev); + stopped = 1; + } + + if (lio_reset_queues(dev, combined_count)) + return -EINVAL; + + if (stopped) + dev->netdev_ops->ndo_open(dev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; +} + +static int lio_get_eeprom_len(struct net_device *netdev) +{ + u8 buf[192]; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_board_info *board_info; + int len; + + board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); + len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", + board_info->name, board_info->serial_number, + board_info->major, board_info->minor); + + return len; +} + +static int +lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_board_info *board_info; + + if (eeprom->offset) + return -EINVAL; + + eeprom->magic = oct_dev->pci_dev->vendor; + board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); + sprintf((char *)bytes, + "boardname:%s serialnum:%s maj:%lld min:%lld\n", + board_info->name, board_info->serial_number, + board_info->major, board_info->minor); + + return 0; +} + +static int octnet_gpio_access(struct net_device *netdev, int addr, int val) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; + nctrl.ncmd.s.param1 = addr; + nctrl.ncmd.s.param2 = val; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); + return -EINVAL; + } + + return 0; +} + +static int octnet_id_active(struct net_device *netdev, int val) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE; + nctrl.ncmd.s.param1 = val; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to configure gpio value, ret=%d\n", ret); + return -EINVAL; + } + + return 0; +} + +/* This routine provides PHY access routines for + * mdio clause45 . + */ +static int +octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) +{ + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_mdio_cmd_resp *mdio_cmd_rsp; + struct oct_mdio_cmd *mdio_cmd; + int retval = 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + sizeof(struct oct_mdio_cmd), + sizeof(struct oct_mdio_cmd_resp), 0); + + if (!sc) + return -ENOMEM; + + mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; + mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; + + mdio_cmd->op = op; + mdio_cmd->mdio_addr = loc; + if (op) + mdio_cmd->value1 = *value; + octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, + 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&oct_dev->pci_dev->dev, + "octnet_mdio45_access instruction failed status: %x\n", + retval); + octeon_free_soft_command(oct_dev, sc); + return -EBUSY; + } else { + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived + */ + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + + retval = mdio_cmd_rsp->status; + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "octnet mdio45 access failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EBUSY; + } + + octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), + sizeof(struct oct_mdio_cmd) / 8); + + if (!op) + *value = mdio_cmd_rsp->resp.value1; + + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +static int lio_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_link_info *linfo; + int value, ret; + u32 cur_ver; + + linfo = &lio->linfo; + cur_ver = OCT_FW_VER(oct->fw_info.ver.maj, + oct->fw_info.ver.min, + oct->fw_info.ver.rev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (oct->chip_id == OCTEON_CN66XX) { + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_DRIVEON); + return 2; + + } else if (oct->chip_id == OCTEON_CN68XX) { + /* Save the current LED settings */ + ret = octnet_mdio45_access(lio, 0, + LIO68XX_LED_BEACON_ADDR, + &lio->phy_beacon_val); + if (ret) + return ret; + + ret = octnet_mdio45_access(lio, 0, + LIO68XX_LED_CTRL_ADDR, + &lio->led_ctrl_val); + if (ret) + return ret; + + /* Configure Beacon values */ + value = LIO68XX_LED_BEACON_CFGON; + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &value); + if (ret) + return ret; + + value = LIO68XX_LED_CTRL_CFGON; + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &value); + if (ret) + return ret; + } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { + octnet_id_active(netdev, LED_IDENTIFICATION_ON); + if (linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + return 2; + else + return 0; + } else { + return -EINVAL; + } + break; + + case ETHTOOL_ID_ON: + if (oct->chip_id == OCTEON_CN23XX_PF_VID && + linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + octnet_id_active(netdev, LED_IDENTIFICATION_ON); + else if (oct->chip_id == OCTEON_CN66XX) + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_HIGH); + else + return -EINVAL; + + break; + + case ETHTOOL_ID_OFF: + if (oct->chip_id == OCTEON_CN23XX_PF_VID && + linfo->link.s.phy_type == LIO_PHY_PORT_TP && + cur_ver > OCT_FW_VER(1, 7, 2)) + octnet_id_active(netdev, LED_IDENTIFICATION_OFF); + else if (oct->chip_id == OCTEON_CN66XX) + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_LOW); + else + return -EINVAL; + + break; + + case ETHTOOL_ID_INACTIVE: + if (oct->chip_id == OCTEON_CN66XX) { + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_DRIVEOFF); + } else if (oct->chip_id == OCTEON_CN68XX) { + /* Restore LED settings */ + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &lio->led_ctrl_val); + if (ret) + return ret; + + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &lio->phy_beacon_val); + if (ret) + return ret; + } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { + octnet_id_active(netdev, LED_IDENTIFICATION_OFF); + + return 0; + } else { + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void +lio_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, + rx_pending = 0; + + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); + + tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; + rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; + rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); + tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); + } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { + tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS; + rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS; + rx_pending = oct->droq[0]->max_count; + tx_pending = oct->instr_queue[0]->max_count; + } + + ering->tx_pending = tx_pending; + ering->tx_max_pending = tx_max_pending; + ering->rx_pending = rx_pending; + ering->rx_max_pending = rx_max_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; +} + +static int lio_23xx_reconfigure_queue_count(struct lio *lio) +{ + struct octeon_device *oct = lio->oct_dev; + u32 resp_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct lio_version *vdata; + u32 ifidx_or_pfnum; + int retval; + int j; + + resp_size = sizeof(struct liquidio_if_cfg_resp); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, data_size, + resp_size, 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n", + __func__); + return -1; + } + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + vdata = (struct lio_version *)sc->virtdptr; + + vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + ifidx_or_pfnum = oct->pf_num; + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings; + if_cfg.s.base_queue = oct->sriov_info.pf_srn; + if_cfg.s.gmx_port_id = oct->pf_num; + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_QCOUNT_UPDATE, 0, + if_cfg.u64, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "Sending iq/oq config failed status: %x\n", + retval); + octeon_free_soft_command(oct, sc); + return -EIO; + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + retval = resp->status; + if (retval) { + dev_err(&oct->pci_dev->dev, + "iq/oq config failed: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -1; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + lio->ifidx = ifidx_or_pfnum; + lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask); + lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask); + for (j = 0; j < lio->linfo.num_rxpciq; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + + for (j = 0; j < lio->linfo.num_txpciq; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n", + lio->linfo.num_rxpciq); + + WRITE_ONCE(sc->caller_is_done, true); + + return 0; +} + +static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int i, queue_count_update = 0; + struct napi_struct *napi, *n; + int ret; + + schedule_timeout_uninterruptible(msecs_to_jiffies(100)); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + if (octeon_set_io_queues_off(oct)) { + dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n"); + return -1; + } + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon. + */ + oct->fn_list.disable_io_queues(oct); + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + + if (num_qs != oct->num_iqs) { + ret = netif_set_real_num_rx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number rx failed\n"); + return ret; + } + + ret = netif_set_real_num_tx_queues(netdev, num_qs); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Setting real number tx failed\n"); + return ret; + } + + /* The value of queue_count_update decides whether it is the + * queue count or the descriptor count that is being + * re-configured. + */ + queue_count_update = 1; + } + + /* Re-configuration of queues can happen in two scenarios, SRIOV enabled + * and SRIOV disabled. Few things like recreating queue zero, resetting + * glists and IRQs are required for both. For the latter, some more + * steps like updating sriov_info for the octeon device need to be done. + */ + if (queue_count_update) { + cleanup_rx_oom_poll_fn(netdev); + + lio_delete_glists(lio); + + /* Delete mbox for PF which is SRIOV disabled because sriov_info + * will be now changed. + */ + if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled) + oct->fn_list.free_mbox(oct); + } + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + if (queue_count_update) { + /* For PF re-configure sriov related information */ + if ((OCTEON_CN23XX_PF(oct)) && + !oct->sriov_info.sriov_enabled) { + oct->sriov_info.num_pf_rings = num_qs; + if (cn23xx_sriov_config(oct)) { + dev_err(&oct->pci_dev->dev, + "Queue reset aborted: SRIOV config failed\n"); + return -1; + } + + num_qs = oct->sriov_info.num_pf_rings; + } + } + + if (oct->fn_list.setup_device_regs(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n"); + return -1; + } + + /* The following are needed in case of queue count re-configuration and + * not for descriptor count re-configuration. + */ + if (queue_count_update) { + if (octeon_setup_instr_queues(oct)) + return -1; + + if (octeon_setup_output_queues(oct)) + return -1; + + /* Recreating mbox for PF that is SRIOV disabled */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return -1; + } + } + + /* Deleting and recreating IRQs whether the interface is SRIOV + * enabled or disabled. + */ + if (lio_irq_reallocate_irqs(oct, num_qs)) { + dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n"); + return -1; + } + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n"); + return -1; + } + + for (i = 0; i < oct->num_oqs; i++) + writel(oct->droq[i]->max_count, + oct->droq[i]->pkts_credit_reg); + + /* Informing firmware about the new queue count. It is required + * for firmware to allocate more number of queues than those at + * load time. + */ + if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) { + if (lio_23xx_reconfigure_queue_count(lio)) + return -1; + } + } + + /* Once firmware is aware of the new value, queues can be recreated */ + if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) { + dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n"); + return -1; + } + + if (queue_count_update) { + if (lio_setup_glists(oct, lio, num_qs)) { + dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n"); + return -1; + } + + if (setup_rx_oom_poll_fn(netdev)) { + dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n"); + return 1; + } + + /* Send firmware the information about new number of queues + * if the interface is a VF or a PF that is SRIOV enabled. + */ + if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct)) + if (lio_send_queue_count_update(netdev, num_qs)) + return -1; + } + + return 0; +} + +static int +lio_ethtool_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering, + struct kernel_ethtool_ringparam *kernel_ering, + struct netlink_ext_ack *extack) +{ + u32 rx_count, tx_count, rx_count_old, tx_count_old; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int stopped = 0; + + if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct)) + return -EINVAL; + + if (ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS, + CN23XX_MAX_OQ_DESCRIPTORS); + tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS, + CN23XX_MAX_IQ_DESCRIPTORS); + + rx_count_old = oct->droq[0]->max_count; + tx_count_old = oct->instr_queue[0]->max_count; + + if (rx_count == rx_count_old && tx_count == tx_count_old) + return 0; + + ifstate_set(lio, LIO_IFSTATE_RESETTING); + + if (netif_running(netdev)) { + netdev->netdev_ops->ndo_stop(netdev); + stopped = 1; + } + + /* Change RX/TX DESCS count */ + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count); + + if (lio_reset_queues(netdev, oct->num_iqs)) + goto err_lio_reset_queues; + + if (stopped) + netdev->netdev_ops->ndo_open(netdev); + + ifstate_reset(lio, LIO_IFSTATE_RESETTING); + + return 0; + +err_lio_reset_queues: + if (tx_count != tx_count_old) + CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + tx_count_old); + if (rx_count != rx_count_old) + CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx, + rx_count_old); + return -EINVAL; +} + +static u32 lio_get_msglevel(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->msg_enable; +} + +static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct lio *lio = GET_LIO(netdev); + + if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { + if (msglvl & NETIF_MSG_HW) + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE, 0); + else + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_DISABLE, 0); + } + + lio->msg_enable = msglvl; +} + +static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct lio *lio = GET_LIO(netdev); + + lio->msg_enable = msglvl; +} + +static void +lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + /* Notes: Not supporting any auto negotiation in these + * drivers. Just report pause frame support. + */ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + pause->autoneg = 0; + + pause->tx_pause = oct->tx_pause; + pause->rx_pause = oct->rx_pause; +} + +static int +lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + /* Notes: Not supporting any auto negotiation in these + * drivers. + */ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct oct_link_info *linfo = &lio->linfo; + + int ret = 0; + + if (oct->chip_id != OCTEON_CN23XX_PF_VID) + return -EINVAL; + + if (linfo->link.s.duplex == 0) { + /*no flow control for half duplex*/ + if (pause->rx_pause || pause->tx_pause) + return -EINVAL; + } + + /*do not support autoneg of link flow control*/ + if (pause->autoneg == AUTONEG_ENABLE) + return -EINVAL; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + if (pause->rx_pause) { + /*enable rx pause*/ + nctrl.ncmd.s.param1 = 1; + } else { + /*disable rx pause*/ + nctrl.ncmd.s.param1 = 0; + } + + if (pause->tx_pause) { + /*enable tx pause*/ + nctrl.ncmd.s.param2 = 1; + } else { + /*disable tx pause*/ + nctrl.ncmd.s.param2 = 0; + } + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Failed to set pause parameter, ret=%d\n", ret); + return -EINVAL; + } + + oct->rx_pause = pause->rx_pause; + oct->tx_pause = pause->tx_pause; + + return 0; +} + +static void +lio_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats __attribute__((unused)), + u64 *data) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct rtnl_link_stats64 lstats; + int i = 0, j; + + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); + /*sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = lstats.rx_packets; + /*sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = lstats.tx_packets; + /*sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = lstats.rx_bytes; + /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors + + oct_dev->link_stats.fromwire.fcs_err + + oct_dev->link_stats.fromwire.jabber_err + + oct_dev->link_stats.fromwire.l2_err + + oct_dev->link_stats.fromwire.frame_err; + data[i++] = lstats.tx_errors; + /*sum of oct->droq[oq_no]->stats->rx_dropped + + *oct->droq[oq_no]->stats->dropped_nodispatch + + *oct->droq[oq_no]->stats->dropped_toomany + + *oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = lstats.rx_dropped + + oct_dev->link_stats.fromwire.fifo_err + + oct_dev->link_stats.fromwire.dmac_drop + + oct_dev->link_stats.fromwire.red_drops + + oct_dev->link_stats.fromwire.fw_err_pko + + oct_dev->link_stats.fromwire.fw_err_link + + oct_dev->link_stats.fromwire.fw_err_drop; + /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.max_collision_fail + + oct_dev->link_stats.fromhost.max_deferral_fail + + oct_dev->link_stats.fromhost.total_collisions + + oct_dev->link_stats.fromhost.fw_err_pko + + oct_dev->link_stats.fromhost.fw_err_link + + oct_dev->link_stats.fromhost.fw_err_drop + + oct_dev->link_stats.fromhost.fw_err_pki; + + /* firmware tx stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx]. + *fromhost.fw_total_sent + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent); + /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki); + /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tso_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_err_tso + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso); + /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost. + *fw_tx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan); + + /* Multicast packets sent by this port */ + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + + /* mac tx statistics */ + /*CVMX_BGXX_CMRX_TX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent); + /*CVMX_BGXX_CMRX_TX_STAT15 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT14 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent); + /*CVMX_BGXX_CMRX_TX_STAT17 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions); + /*CVMX_BGXX_CMRX_TX_STAT3 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT2 */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent); + /*CVMX_BGXX_CMRX_TX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail); + /*CVMX_BGXX_CMRX_TX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail); + /*CVMX_BGXX_CMRX_TX_STAT16 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err); + /*CVMX_BGXX_CMRX_TX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts); + + /* RX firmware stats */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_rcvd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_total_fwd + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd); + /* Multicast packets received on this port */ + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; + /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err); + /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_err_pko + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko); + /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_err_drop + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop); + + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan); + /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx]. + *fromwire.fw_rx_vxlan_err + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err); + + /* LRO */ + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_pkts + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_octs + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs); + /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_port + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_seq + */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_tsval + */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval); + /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire. + *fw_lro_aborts_timer + */ + /* intrmod: packet forward rate */ + data[i++] = + CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer); + /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate); + + /* mac: link-level stats */ + /*CVMX_BGXX_CMRX_RX_STAT0 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT1 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst); + /*CVMX_PKI_STATX_STAT5 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts); + /*CVMX_BGXX_CMRX_RX_STAT2 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd); + /*CVMX_BGXX_CMRX_RX_STAT6 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err); + /*CVMX_BGXX_CMRX_RX_STAT4 */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop); + /*wqe->word2.err_code or wqe->word2.err_level */ + data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err); + /*lio->link_changes*/ + data[i++] = CVM_CAST64(lio->link_changes); + + for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.iq & BIT_ULL(j))) + continue; + /*packets to network port*/ + /*# of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /*# of bytes tx to network */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /*# of packets dropped */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + /*# of tx fails due to queue full */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + /*XXX gather entries sent */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); + + /*instruction to firmware: data and control */ + /*# of instructions to the queue */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); + /*# of instructions processed */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_processed); + /*# of instructions could not be processed */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_dropped); + /*bytes sent through the queue */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + + /*tso request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /*vxlan request*/ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); + /*txq restart*/ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart); + } + + /* RX */ + for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) { + if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) + continue; + + /*packets send to TCP/IP network stack */ + /*# of packets to network stack */ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); + /*# of bytes to network stack */ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); + /*# of packets dropped */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /*control and data path*/ + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); + } +} + +static void lio_vf_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats + __attribute__((unused)), + u64 *data) +{ + struct rtnl_link_stats64 lstats; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int i = 0, j, vj; + + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + + netdev->netdev_ops->ndo_get_stats64(netdev, &lstats); + /* sum of oct->droq[oq_no]->stats->rx_pkts_received */ + data[i++] = lstats.rx_packets; + /* sum of oct->instr_queue[iq_no]->stats.tx_done */ + data[i++] = lstats.tx_packets; + /* sum of oct->droq[oq_no]->stats->rx_bytes_received */ + data[i++] = lstats.rx_bytes; + /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */ + data[i++] = lstats.tx_bytes; + data[i++] = lstats.rx_errors; + data[i++] = lstats.tx_errors; + /* sum of oct->droq[oq_no]->stats->rx_dropped + + * oct->droq[oq_no]->stats->dropped_nodispatch + + * oct->droq[oq_no]->stats->dropped_toomany + + * oct->droq[oq_no]->stats->dropped_nomem + */ + data[i++] = lstats.rx_dropped; + /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */ + data[i++] = lstats.tx_dropped + + oct_dev->link_stats.fromhost.fw_err_drop; + + data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent; + data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast; + data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent; + + /* lio->link_changes */ + data[i++] = CVM_CAST64(lio->link_changes); + + for (vj = 0; vj < oct_dev->num_iqs; vj++) { + j = lio->linfo.txpciq[vj].s.q_no; + + /* packets to network port */ + /* # of packets tx to network */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + /* # of bytes tx to network */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_tot_bytes); + /* # of packets dropped */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_dropped); + /* # of tx fails due to queue full */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_iq_busy); + /* XXX gather entries sent */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.sgentry_sent); + + /* instruction to firmware: data and control */ + /* # of instructions to the queue */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_posted); + /* # of instructions processed */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed); + /* # of instructions could not be processed */ + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped); + /* bytes sent through the queue */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.bytes_sent); + /* tso request */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso); + /* vxlan request */ + data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan); + /* txq restart */ + data[i++] = CVM_CAST64( + oct_dev->instr_queue[j]->stats.tx_restart); + } + + /* RX */ + for (vj = 0; vj < oct_dev->num_oqs; vj++) { + j = lio->linfo.rxpciq[vj].s.q_no; + + /* packets send to TCP/IP network stack */ + /* # of packets to network stack */ + data[i++] = CVM_CAST64( + oct_dev->droq[j]->stats.rx_pkts_received); + /* # of bytes to network stack */ + data[i++] = CVM_CAST64( + oct_dev->droq[j]->stats.rx_bytes_received); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem + + oct_dev->droq[j]->stats.dropped_toomany + + oct_dev->droq[j]->stats.rx_dropped); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + + /* control and data path */ + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure); + } +} + +static void lio_get_priv_flags_strings(struct lio *lio, u8 *data) +{ + struct octeon_device *oct_dev = lio->oct_dev; + int i; + + switch (oct_dev->chip_id) { + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: + for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) { + sprintf(data, "%s", oct_priv_flags_strings[i]); + data += ETH_GSTRING_LEN; + } + break; + case OCTEON_CN68XX: + case OCTEON_CN66XX: + break; + default: + netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + break; + } +} + +static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int num_iq_stats, num_oq_stats, i, j; + int num_stats; + + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + case ETH_SS_PRIV_FLAGS: + lio_get_priv_flags_strings(lio, data); + break; + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; + } +} + +static void lio_vf_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + int num_iq_stats, num_oq_stats, i, j; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int num_stats; + + switch (stringset) { + case ETH_SS_STATS: + num_stats = ARRAY_SIZE(oct_vf_stats_strings); + for (j = 0; j < num_stats; j++) { + sprintf(data, "%s", oct_vf_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.iq & BIT_ULL(i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "tx-%d-%s", i, + oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) { + if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "rx-%d-%s", i, + oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + break; + + case ETH_SS_PRIV_FLAGS: + lio_get_priv_flags_strings(lio, data); + break; + default: + netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n"); + break; + } +} + +static int lio_get_priv_flags_ss_count(struct lio *lio) +{ + struct octeon_device *oct_dev = lio->oct_dev; + + switch (oct_dev->chip_id) { + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: + return ARRAY_SIZE(oct_priv_flags_strings); + case OCTEON_CN68XX: + case OCTEON_CN66XX: + return -EOPNOTSUPP; + default: + netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + return -EOPNOTSUPP; + } +} + +static int lio_get_sset_count(struct net_device *netdev, int sset) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + case ETH_SS_PRIV_FLAGS: + return lio_get_priv_flags_ss_count(lio); + default: + return -EOPNOTSUPP; + } +} + +static int lio_vf_get_sset_count(struct net_device *netdev, int sset) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + switch (sset) { + case ETH_SS_STATS: + return (ARRAY_SIZE(oct_vf_stats_strings) + + ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs + + ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); + case ETH_SS_PRIV_FLAGS: + return lio_get_priv_flags_ss_count(lio); + default: + return -EOPNOTSUPP; + } +} + +/* get interrupt moderation parameters */ +static int octnet_get_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) +{ + struct octeon_soft_command *sc; + struct oct_intrmod_resp *resp; + int retval; + struct octeon_device *oct_dev = lio->oct_dev; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + 0, + sizeof(struct oct_intrmod_resp), 0); + + if (!sc) + return -ENOMEM; + + resp = (struct oct_intrmod_resp *)sc->virtrptr; + memset(resp, 0, sizeof(struct oct_intrmod_resp)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return -ENODEV; + + if (resp->status) { + dev_err(&oct_dev->pci_dev->dev, + "Get interrupt moderation parameters failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; + } + + octeon_swap_8B_data((u64 *)&resp->intrmod, + (sizeof(struct oct_intrmod_cfg)) / 8); + memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg)); + WRITE_ONCE(sc->caller_is_done, true); + + return 0; +} + +/* Configure interrupt moderation parameters */ +static int octnet_set_intrmod_cfg(struct lio *lio, + struct oct_intrmod_cfg *intr_cfg) +{ + struct octeon_soft_command *sc; + struct oct_intrmod_cfg *cfg; + int retval; + struct octeon_device *oct_dev = lio->oct_dev; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + sizeof(struct oct_intrmod_cfg), + 16, 0); + + if (!sc) + return -ENOMEM; + + cfg = (struct oct_intrmod_cfg *)sc->virtdptr; + + memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); + octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(oct_dev, sc, 0); + if (retval) + return retval; + + retval = sc->sc_status; + if (retval == 0) { + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation %s\n", + (intr_cfg->rx_enable) ? + "enabled" : "disabled"); + WRITE_ONCE(sc->caller_is_done, true); + return 0; + } + + dev_err(&oct_dev->pci_dev->dev, + "intrmod config failed. Status: %x\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -ENODEV; +} + +static int lio_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_instr_queue *iq; + struct oct_intrmod_cfg intrmod_cfg; + + if (octnet_get_intrmod_cfg(lio, &intrmod_cfg)) + return -ENODEV; + + switch (oct->chip_id) { + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: { + if (!intrmod_cfg.rx_enable) { + intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs; + intr_coal->rx_max_coalesced_frames = + oct->rx_max_coalesced_frames; + } + if (!intrmod_cfg.tx_enable) + intr_coal->tx_max_coalesced_frames = + oct->tx_max_coalesced_frames; + break; + } + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intrmod_cfg.rx_enable) { + intr_coal->rx_coalesce_usecs = + CFG_GET_OQ_INTR_TIME(cn6xxx->conf); + intr_coal->rx_max_coalesced_frames = + CFG_GET_OQ_INTR_PKT(cn6xxx->conf); + } + iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no]; + intr_coal->tx_max_coalesced_frames = iq->fill_threshold; + break; + } + default: + netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + return -EINVAL; + } + if (intrmod_cfg.rx_enable) { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg.rx_enable; + intr_coal->rate_sample_interval = + intrmod_cfg.check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg.maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg.minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg.rx_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg.rx_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg.rx_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg.rx_mincnt_trigger; + } + if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) && + (intrmod_cfg.tx_enable)) { + intr_coal->use_adaptive_tx_coalesce = + intrmod_cfg.tx_enable; + intr_coal->tx_max_coalesced_frames_high = + intrmod_cfg.tx_maxcnt_trigger; + intr_coal->tx_max_coalesced_frames_low = + intrmod_cfg.tx_mincnt_trigger; + } + return 0; +} + +/* Enable/Disable auto interrupt Moderation */ +static int oct_cfg_adaptive_intr(struct lio *lio, + struct oct_intrmod_cfg *intrmod_cfg, + struct ethtool_coalesce *intr_coal) +{ + int ret = 0; + + if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) { + intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval; + intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high; + intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low; + } + if (intrmod_cfg->rx_enable) { + intrmod_cfg->rx_maxcnt_trigger = + intr_coal->rx_max_coalesced_frames_high; + intrmod_cfg->rx_maxtmr_trigger = + intr_coal->rx_coalesce_usecs_high; + intrmod_cfg->rx_mintmr_trigger = + intr_coal->rx_coalesce_usecs_low; + intrmod_cfg->rx_mincnt_trigger = + intr_coal->rx_max_coalesced_frames_low; + } + if (intrmod_cfg->tx_enable) { + intrmod_cfg->tx_maxcnt_trigger = + intr_coal->tx_max_coalesced_frames_high; + intrmod_cfg->tx_mincnt_trigger = + intr_coal->tx_max_coalesced_frames_low; + } + + ret = octnet_set_intrmod_cfg(lio, intrmod_cfg); + + return ret; +} + +static int +oct_cfg_rx_intrcnt(struct lio *lio, + struct oct_intrmod_cfg *intrmod, + struct ethtool_coalesce *intr_coal) +{ + struct octeon_device *oct = lio->oct_dev; + u32 rx_max_coalesced_frames; + + /* Config Cnt based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + rx_max_coalesced_frames); + CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + break; + } + case OCTEON_CN23XX_PF_VID: { + int q_no; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = intrmod->rx_frames; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + q_no += oct->sriov_info.pf_srn; + octeon_write_csr64( + oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), + (octeon_read_csr64( + oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) & + (0x3fffff00000000UL)) | + (rx_max_coalesced_frames - 1)); + /*consider setting resend bit*/ + } + intrmod->rx_frames = rx_max_coalesced_frames; + oct->rx_max_coalesced_frames = rx_max_coalesced_frames; + break; + } + case OCTEON_CN23XX_VF_VID: { + int q_no; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = intrmod->rx_frames; + else + rx_max_coalesced_frames = + intr_coal->rx_max_coalesced_frames; + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (octeon_read_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) & + (0x3fffff00000000UL)) | + (rx_max_coalesced_frames - 1)); + /*consider writing to resend bit here*/ + } + intrmod->rx_frames = rx_max_coalesced_frames; + oct->rx_max_coalesced_frames = rx_max_coalesced_frames; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static int oct_cfg_rx_intrtime(struct lio *lio, + struct oct_intrmod_cfg *intrmod, + struct ethtool_coalesce *intr_coal) +{ + struct octeon_device *oct = lio->oct_dev; + u32 time_threshold, rx_coalesce_usecs; + + /* Config Time based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + + time_threshold = lio_cn6xxx_get_oq_ticks(oct, + rx_coalesce_usecs); + octeon_write_csr(oct, + CN6XXX_SLI_OQ_INT_LEVEL_TIME, + time_threshold); + + CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + break; + } + case OCTEON_CN23XX_PF_VID: { + u64 time_threshold; + int q_no; + + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = intrmod->rx_usecs; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + time_threshold = + cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + q_no += oct->sriov_info.pf_srn; + octeon_write_csr64(oct, + CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no), + (intrmod->rx_frames | + ((u64)time_threshold << 32))); + /*consider writing to resend bit here*/ + } + intrmod->rx_usecs = rx_coalesce_usecs; + oct->rx_coalesce_usecs = rx_coalesce_usecs; + break; + } + case OCTEON_CN23XX_VF_VID: { + u64 time_threshold; + int q_no; + + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = intrmod->rx_usecs; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + + time_threshold = + cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs); + for (q_no = 0; q_no < oct->num_oqs; q_no++) { + octeon_write_csr64( + oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no), + (intrmod->rx_frames | + ((u64)time_threshold << 32))); + /*consider setting resend bit*/ + } + intrmod->rx_usecs = rx_coalesce_usecs; + oct->rx_coalesce_usecs = rx_coalesce_usecs; + break; + } + default: + return -EINVAL; + } + + return 0; +} + +static int +oct_cfg_tx_intrcnt(struct lio *lio, + struct oct_intrmod_cfg *intrmod, + struct ethtool_coalesce *intr_coal) +{ + struct octeon_device *oct = lio->oct_dev; + u32 iq_intr_pkt; + void __iomem *inst_cnt_reg; + u64 val; + + /* Config Cnt based interrupt values */ + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + break; + case OCTEON_CN23XX_VF_VID: + case OCTEON_CN23XX_PF_VID: { + int q_no; + + if (!intr_coal->tx_max_coalesced_frames) + iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD & + CN23XX_PKT_IN_DONE_WMARK_MASK; + else + iq_intr_pkt = intr_coal->tx_max_coalesced_frames & + CN23XX_PKT_IN_DONE_WMARK_MASK; + for (q_no = 0; q_no < oct->num_iqs; q_no++) { + inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg; + val = readq(inst_cnt_reg); + /*clear wmark and count.dont want to write count back*/ + val = (val & 0xFFFF000000000000ULL) | + ((u64)(iq_intr_pkt - 1) + << CN23XX_PKT_IN_DONE_WMARK_BIT_POS); + writeq(val, inst_cnt_reg); + /*consider setting resend bit*/ + } + intrmod->tx_frames = iq_intr_pkt; + oct->tx_max_coalesced_frames = iq_intr_pkt; + break; + } + default: + return -EINVAL; + } + return 0; +} + +static int lio_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct lio *lio = GET_LIO(netdev); + int ret; + struct octeon_device *oct = lio->oct_dev; + struct oct_intrmod_cfg intrmod = {0}; + u32 j, q_no; + int db_max, db_min; + + switch (oct->chip_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + db_min = CN6XXX_DB_MIN; + db_max = CN6XXX_DB_MAX; + if ((intr_coal->tx_max_coalesced_frames >= db_min) && + (intr_coal->tx_max_coalesced_frames <= db_max)) { + for (j = 0; j < lio->linfo.num_txpciq; j++) { + q_no = lio->linfo.txpciq[j].s.q_no; + oct->instr_queue[q_no]->fill_threshold = + intr_coal->tx_max_coalesced_frames; + } + } else { + dev_err(&oct->pci_dev->dev, + "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", + intr_coal->tx_max_coalesced_frames, + db_min, db_max); + return -EINVAL; + } + break; + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: + break; + default: + return -EINVAL; + } + + intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0; + intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0; + intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); + intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); + + ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal); + + if (!intr_coal->use_adaptive_rx_coalesce) { + ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal); + if (ret) + goto ret_intrmod; + + ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal); + if (ret) + goto ret_intrmod; + } else { + oct->rx_coalesce_usecs = + CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct)); + oct->rx_max_coalesced_frames = + CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct)); + } + + if (!intr_coal->use_adaptive_tx_coalesce) { + ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal); + if (ret) + goto ret_intrmod; + } else { + oct->tx_max_coalesced_frames = + CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct)); + } + + return 0; +ret_intrmod: + return ret; +} + +static int lio_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct lio *lio = GET_LIO(netdev); + + info->so_timestamping = +#ifdef PTP_HARDWARE_TIMESTAMPING + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | +#endif + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE; + + if (lio->ptp_clock) + info->phc_index = ptp_clock_index(lio->ptp_clock); + else + info->phc_index = -1; + +#ifdef PTP_HARDWARE_TIMESTAMPING + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); +#endif + + return 0; +} + +/* Return register dump len. */ +static int lio_get_regs_len(struct net_device *dev) +{ + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + + switch (oct->chip_id) { + case OCTEON_CN23XX_PF_VID: + return OCT_ETHTOOL_REGDUMP_LEN_23XX; + case OCTEON_CN23XX_VF_VID: + return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF; + default: + return OCT_ETHTOOL_REGDUMP_LEN; + } +} + +static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct) +{ + u32 reg; + u8 pf_num = oct->pf_num; + int len = 0; + int i; + + /* PCI Window Registers */ + + len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); + + /*0x29030 or 0x29040*/ + reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n", + reg, oct->pcie_port, oct->pf_num, + (u64)octeon_read_csr64(oct, reg)); + + /*0x27080 or 0x27090*/ + reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num); + len += + sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n", + reg, oct->pcie_port, oct->pf_num, + (u64)octeon_read_csr64(oct, reg)); + + /*0x27000 or 0x27010*/ + reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num); + len += + sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n", + reg, oct->pcie_port, oct->pf_num, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29120*/ + reg = 0x29120; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x27300*/ + reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + + (oct->pf_num) * CN23XX_PF_INT_OFFSET; + len += sprintf( + s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg, + oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg)); + + /*0x27200*/ + reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET + + (oct->pf_num) * CN23XX_PF_INT_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n", + reg, oct->pcie_port, oct->pf_num, + (u64)octeon_read_csr64(oct, reg)); + + /*29130*/ + reg = CN23XX_SLI_PKT_CNT_INT; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29140*/ + reg = CN23XX_SLI_PKT_TIME_INT; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29160*/ + reg = 0x29160; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29180*/ + reg = CN23XX_SLI_OQ_WMARK; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n", + reg, (u64)octeon_read_csr64(oct, reg)); + + /*0x291E0*/ + reg = CN23XX_SLI_PKT_IOQ_RING_RST; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29210*/ + reg = CN23XX_SLI_GBL_CONTROL; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg, + (u64)octeon_read_csr64(oct, reg)); + + /*0x29220*/ + reg = 0x29220; + len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n", + reg, (u64)octeon_read_csr64(oct, reg)); + + /*PF only*/ + if (pf_num == 0) { + /*0x29260*/ + reg = CN23XX_SLI_OUT_BP_EN_W1S; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n", + reg, (u64)octeon_read_csr64(oct, reg)); + } else if (pf_num == 1) { + /*0x29270*/ + reg = CN23XX_SLI_OUT_BP_EN2_W1S; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n", + reg, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i); + len += + sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10040*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10080*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_PKTS_CREDIT(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10090*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_SIZE(i); + len += sprintf( + s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10050*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_PKT_CONTROL(i); + len += sprintf( + s + len, + "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10070*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_BASE_ADDR64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x100a0*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x100b0*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = CN23XX_SLI_OQ_PKTS_SENT(i); + len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x100c0*/ + for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) { + reg = 0x100c0 + i * CN23XX_OQ_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + + /*0x10000*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + reg = CN23XX_SLI_IQ_PKT_CONTROL64(i); + len += sprintf( + s + len, + "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10010*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + reg = CN23XX_SLI_IQ_BASE_ADDR64(i); + len += sprintf( + s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg, + i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10020*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + reg = CN23XX_SLI_IQ_DOORBELL(i); + len += sprintf( + s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10030*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) { + reg = CN23XX_SLI_IQ_SIZE(i); + len += sprintf( + s + len, + "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + /*0x10040*/ + for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) + reg = CN23XX_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + return len; +} + +static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct) +{ + int len = 0; + u32 reg; + int i; + + /* PCI Window Registers */ + + len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i); + len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET; + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_DOORBELL(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_SIZE(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) { + reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i); + len += sprintf(s + len, + "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n", + reg, i, (u64)octeon_read_csr64(oct, reg)); + } + + return len; +} + +static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) +{ + u32 reg; + int i, len = 0; + + /* PCI Window Registers */ + + len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); + reg = CN6XXX_WIN_WR_ADDR_LO; + len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", + CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_ADDR_HI; + len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", + CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_RD_ADDR_LO; + len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", + CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_RD_ADDR_HI; + len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", + CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_DATA_LO; + len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", + CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_DATA_HI; + len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", + CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); + len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", + CN6XXX_WIN_WR_MASK_REG, + octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); + + /* PCI Interrupt Register */ + len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", + CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, + CN6XXX_SLI_INT_ENB64_PORT0)); + len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", + CN6XXX_SLI_INT_ENB64_PORT1, + octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); + len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, + octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); + + /* PCI Output queue registers */ + for (i = 0; i < oct->num_oqs; i++) { + reg = CN6XXX_SLI_OQ_PKTS_SENT(i); + len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); + len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + } + reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; + len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", + reg, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; + len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", + reg, octeon_read_csr(oct, reg)); + + /* PCI Input queue registers */ + for (i = 0; i <= 3; i++) { + u32 reg; + + reg = CN6XXX_SLI_IQ_DOORBELL(i); + len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); + len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + } + + /* PCI DMA registers */ + + len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", + CN6XXX_DMA_CNT(0), + octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); + reg = CN6XXX_DMA_PKT_INT_LEVEL(0); + len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", + CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); + reg = CN6XXX_DMA_TIME_INT_LEVEL(0); + len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", + CN6XXX_DMA_TIME_INT_LEVEL(0), + octeon_read_csr(oct, reg)); + + len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", + CN6XXX_DMA_CNT(1), + octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); + reg = CN6XXX_DMA_PKT_INT_LEVEL(1); + len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", + CN6XXX_DMA_PKT_INT_LEVEL(1), + octeon_read_csr(oct, reg)); + reg = CN6XXX_DMA_PKT_INT_LEVEL(1); + len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", + CN6XXX_DMA_TIME_INT_LEVEL(1), + octeon_read_csr(oct, reg)); + + /* PCI Index registers */ + + len += sprintf(s + len, "\n"); + + for (i = 0; i < 16; i++) { + reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); + len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", + CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); + } + + return len; +} + +static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) +{ + u32 val; + int i, len = 0; + + /* PCI CONFIG Registers */ + + len += sprintf(s + len, + "\n\t Octeon Config space Registers\n\n"); + + for (i = 0; i <= 13; i++) { + pci_read_config_dword(oct->pci_dev, (i * 4), &val); + len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", + (i * 4), i, val); + } + + for (i = 30; i <= 34; i++) { + pci_read_config_dword(oct->pci_dev, (i * 4), &val); + len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", + (i * 4), i, val); + } + + return len; +} + +/* Return register dump user app. */ +static void lio_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *regbuf) +{ + struct lio *lio = GET_LIO(dev); + int len = 0; + struct octeon_device *oct = lio->oct_dev; + + regs->version = OCT_ETHTOOL_REGSVER; + + switch (oct->chip_id) { + case OCTEON_CN23XX_PF_VID: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX); + len += cn23xx_read_csr_reg(regbuf + len, oct); + break; + case OCTEON_CN23XX_VF_VID: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF); + len += cn23xx_vf_read_csr_reg(regbuf + len, oct); + break; + case OCTEON_CN68XX: + case OCTEON_CN66XX: + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); + len += cn6xxx_read_csr_reg(regbuf + len, oct); + len += cn6xxx_read_config_reg(regbuf + len, oct); + break; + default: + dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", + __func__, oct->chip_id); + } +} + +static u32 lio_get_priv_flags(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->oct_dev->priv_flags; +} + +static int lio_set_priv_flags(struct net_device *netdev, u32 flags) +{ + struct lio *lio = GET_LIO(netdev); + bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES)); + + lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES, + intr_by_tx_bytes); + return 0; +} + +static int lio_get_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + fec->active_fec = ETHTOOL_FEC_NONE; + fec->fec = ETHTOOL_FEC_NONE; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return 0; + + liquidio_get_fec(lio); + fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF); + if (oct->props[lio->ifidx].fec == 1) + fec->active_fec = ETHTOOL_FEC_RS; + else + fec->active_fec = ETHTOOL_FEC_OFF; + } + + return 0; +} + +static int lio_set_fecparam(struct net_device *netdev, + struct ethtool_fecparam *fec) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || + oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { + if (oct->no_speed_setting == 1) + return -EOPNOTSUPP; + + if (fec->fec & ETHTOOL_FEC_OFF) + liquidio_set_fec(lio, 0); + else if (fec->fec & ETHTOOL_FEC_RS) + liquidio_set_fec(lio, 1); + else + return -EOPNOTSUPP; + } else { + return -EOPNOTSUPP; + } + + return 0; +} + +#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \ + ETHTOOL_COALESCE_MAX_FRAMES | \ + ETHTOOL_COALESCE_USE_ADAPTIVE | \ + ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \ + ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \ + ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \ + ETHTOOL_COALESCE_PKT_RATE_RX_USECS) + +static const struct ethtool_ops lio_ethtool_ops = { + .supported_coalesce_params = LIO_ETHTOOL_COALESCE, + .get_link_ksettings = lio_get_link_ksettings, + .set_link_ksettings = lio_set_link_ksettings, + .get_fecparam = lio_get_fecparam, + .set_fecparam = lio_set_fecparam, + .get_link = ethtool_op_get_link, + .get_drvinfo = lio_get_drvinfo, + .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, + .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, + .set_phys_id = lio_set_phys_id, + .get_eeprom_len = lio_get_eeprom_len, + .get_eeprom = lio_get_eeprom, + .get_strings = lio_get_strings, + .get_ethtool_stats = lio_get_ethtool_stats, + .get_pauseparam = lio_get_pauseparam, + .set_pauseparam = lio_set_pauseparam, + .get_regs_len = lio_get_regs_len, + .get_regs = lio_get_regs, + .get_msglevel = lio_get_msglevel, + .set_msglevel = lio_set_msglevel, + .get_sset_count = lio_get_sset_count, + .get_coalesce = lio_get_intr_coalesce, + .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, + .get_ts_info = lio_get_ts_info, +}; + +static const struct ethtool_ops lio_vf_ethtool_ops = { + .supported_coalesce_params = LIO_ETHTOOL_COALESCE, + .get_link_ksettings = lio_get_link_ksettings, + .get_link = ethtool_op_get_link, + .get_drvinfo = lio_get_vf_drvinfo, + .get_ringparam = lio_ethtool_get_ringparam, + .set_ringparam = lio_ethtool_set_ringparam, + .get_channels = lio_ethtool_get_channels, + .set_channels = lio_ethtool_set_channels, + .get_strings = lio_vf_get_strings, + .get_ethtool_stats = lio_vf_get_ethtool_stats, + .get_regs_len = lio_get_regs_len, + .get_regs = lio_get_regs, + .get_msglevel = lio_get_msglevel, + .set_msglevel = lio_vf_set_msglevel, + .get_sset_count = lio_vf_get_sset_count, + .get_coalesce = lio_get_intr_coalesce, + .set_coalesce = lio_set_intr_coalesce, + .get_priv_flags = lio_get_priv_flags, + .set_priv_flags = lio_set_priv_flags, + .get_ts_info = lio_get_ts_info, +}; + +void liquidio_set_ethtool_ops(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (OCTEON_CN23XX_VF(oct)) + netdev->ethtool_ops = &lio_vf_ethtool_ops; + else + netdev->ethtool_ops = &lio_ethtool_ops; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c new file mode 100644 index 000000000..98793b2ac --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -0,0 +1,4366 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <net/vxlan.h> +#include <linux/kthread.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_device.h" +#include "cn23xx_pf_device.h" +#include "liquidio_image.h" +#include "lio_vf_rep.h" + +MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME + "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); + +static int ddr_timeout = 10000; +module_param(ddr_timeout, int, 0644); +MODULE_PARM_DESC(ddr_timeout, + "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); + +static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; +module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); +MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); + +static u32 console_bitmask; +module_param(console_bitmask, int, 0644); +MODULE_PARM_DESC(console_bitmask, + "Bitmask indicating which consoles have debug output redirected to syslog."); + +/** + * octeon_console_debug_enabled - determines if a given console has debug enabled. + * @console: console to check + * Return: 1 = enabled. 0 otherwise + */ +static int octeon_console_debug_enabled(u32 console) +{ + return (console_bitmask >> (console)) & 0x1; +} + +/* Polling interval for determining when NIC application is alive */ +#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 + +/* runtime link query interval */ +#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 +/* update localtime to octeon firmware every 60 seconds. + * make firmware to use same time reference, so that it will be easy to + * correlate firmware logged events/errors with host events, for debugging. + */ +#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 + +/* time to wait for possible in-flight requests in milliseconds */ +#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) + +struct oct_link_status_resp { + u64 rh; + struct oct_link_info link_info; + u64 status; +}; + +struct oct_timestamp_resp { + u64 rh; + u64 timestamp; + u64 status; +}; + +#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) + +union tx_info { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 gso_size; + u16 gso_segs; + u32 reserved; +#else + u32 reserved; + u16 gso_segs; + u16 gso_size; +#endif + } s; +}; + +/* Octeon device properties to be used by the NIC module. + * Each octeon device in the system will be represented + * by this structure in the NIC module. + */ + +#define OCTNIC_GSO_MAX_HEADER_SIZE 128 +#define OCTNIC_GSO_MAX_SIZE \ + (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) + +struct handshake { + struct completion init; + struct completion started; + struct pci_dev *pci_dev; + int init_ok; + int started_ok; +}; + +#ifdef CONFIG_PCI_IOV +static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); +#endif + +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix); + +static int octeon_device_init(struct octeon_device *); +static int liquidio_stop(struct net_device *netdev); +static void liquidio_remove(struct pci_dev *pdev); +static int liquidio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); +static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, + int linkstate); + +static struct handshake handshake[MAX_OCTEON_DEVICES]; +static struct completion first_stage; + +static void octeon_droq_bh(struct tasklet_struct *t) +{ + int q_no; + int reschedule = 0; + struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t, + droq_tasklet); + struct octeon_device *oct = oct_priv->dev; + + for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { + if (!(oct->io_qmask.oq & BIT_ULL(q_no))) + continue; + reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], + MAX_PACKET_BUDGET); + lio_enable_irq(oct->droq[q_no], NULL); + + if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { + /* set time and cnt interrupt thresholds for this DROQ + * for NAPI + */ + int adjusted_q_no = q_no + oct->sriov_info.pf_srn; + + octeon_write_csr64( + oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), + 0x5700000040ULL); + octeon_write_csr64( + oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); + } + } + + if (reschedule) + tasklet_schedule(&oct_priv->droq_tasklet); +} + +static int lio_wait_for_oq_pkts(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + int retry = 100, pkt_cnt = 0, pending_pkts = 0; + int i; + + do { + pending_pkts = 0; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); + } + if (pkt_cnt > 0) { + pending_pkts += pkt_cnt; + tasklet_schedule(&oct_priv->droq_tasklet); + } + pkt_cnt = 0; + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pkt_cnt; +} + +/** + * force_io_queues_off - Forces all IO queues off on a given device + * @oct: Pointer to Octeon device + */ +static void force_io_queues_off(struct octeon_device *oct) +{ + if ((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX)) { + /* Reset the Enable bits for Input Queues. */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); + + /* Reset the Enable bits for Output Queues. */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); + } +} + +/** + * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc + * @oct: Pointer to Octeon device + */ +static inline void pcierror_quiesce_device(struct octeon_device *oct) +{ + int i; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet processing + * to finish. + */ + force_io_queues_off(oct); + + /* To allow for in-flight requests */ + schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Force all requests waiting to be fetched by OCTEON to complete. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + /* Force all pending ordered list requests to time out. */ + lio_process_ordered_list(oct, 1); + + /* We do not need to wait for output queue packets to be processed. */ +} + +/** + * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status + * @dev: Pointer to PCI device + */ +static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + int pos = 0x100; + u32 status, mask; + + pr_info("%s :\n", __func__); + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); + if (dev->error_state == pci_channel_io_normal) + status &= ~mask; /* Clear corresponding nonfatal bits */ + else + status &= mask; /* Clear corresponding fatal bits */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); +} + +/** + * stop_pci_io - Stop all PCI IO to a given device + * @oct: Pointer to Octeon device + */ +static void stop_pci_io(struct octeon_device *oct) +{ + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + pci_disable_device(oct->pci_dev); + + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + pcierror_quiesce_device(oct); + + /* Release the interrupt line */ + free_irq(oct->pci_dev->irq, oct); + + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + /* making it a common function for all OCTEON models */ + cleanup_aer_uncorrect_error_status(oct->pci_dev); +} + +/** + * liquidio_pcie_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct octeon_device *oct = pci_get_drvdata(pdev); + + /* Non-correctable Non-fatal errors */ + if (state == pci_channel_io_normal) { + dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); + cleanup_aer_uncorrect_error_status(oct->pci_dev); + return PCI_ERS_RESULT_CAN_RECOVER; + } + + /* Non-correctable Fatal errors */ + dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); + stop_pci_io(oct); + + /* Always return a DISCONNECT. There is no support for recovery but only + * for a clean shutdown. + */ + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * liquidio_pcie_mmio_enabled - mmio handler + * @pdev: Pointer to PCI device + */ +static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev) +{ + /* We should never hit this since we never ask for a reset for a Fatal + * Error. We always return DISCONNECT in io_error above. + * But play safe and return RECOVERED for now. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * liquidio_pcie_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the octeon_resume routine. + */ +static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev) +{ + /* We should never hit this since we never ask for a reset for a Fatal + * Error. We always return DISCONNECT in io_error above. + * But play safe and return RECOVERED for now. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * liquidio_pcie_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the octeon_resume routine. + */ +static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev) +{ + /* Nothing to be done here. */ +} + +#define liquidio_suspend NULL +#define liquidio_resume NULL + +/* For PCI-E Advanced Error Recovery (AER) Interface */ +static const struct pci_error_handlers liquidio_err_handler = { + .error_detected = liquidio_pcie_error_detected, + .mmio_enabled = liquidio_pcie_mmio_enabled, + .slot_reset = liquidio_pcie_slot_reset, + .resume = liquidio_pcie_resume, +}; + +static const struct pci_device_id liquidio_pci_tbl[] = { + { /* 68xx */ + PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { /* 66xx */ + PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { /* 23xx pf */ + PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0 + } +}; +MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); + +static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); + +static struct pci_driver liquidio_pci_driver = { + .name = "LiquidIO", + .id_table = liquidio_pci_tbl, + .probe = liquidio_probe, + .remove = liquidio_remove, + .err_handler = &liquidio_err_handler, /* For AER */ + .driver.pm = &liquidio_pm_ops, +#ifdef CONFIG_PCI_IOV + .sriov_configure = liquidio_enable_sriov, +#endif +}; + +/** + * liquidio_init_pci - register PCI driver + */ +static int liquidio_init_pci(void) +{ + return pci_register_driver(&liquidio_pci_driver); +} + +/** + * liquidio_deinit_pci - unregister PCI driver + */ +static void liquidio_deinit_pci(void) +{ + pci_unregister_driver(&liquidio_pci_driver); +} + +/** + * check_txq_status - Check Tx queue status, and take appropriate action + * @lio: per-network private data + * Return: 0 if full, number of queues woken up otherwise + */ +static inline int check_txq_status(struct lio *lio) +{ + int numqs = lio->netdev->real_num_tx_queues; + int ret_val = 0; + int q, iq; + + /* check each sub-queue state */ + for (q = 0; q < numqs; q++) { + iq = lio->linfo.txpciq[q % + lio->oct_dev->num_iqs].s.q_no; + if (octnet_iq_is_full(lio->oct_dev, iq)) + continue; + if (__netif_subqueue_stopped(lio->netdev, q)) { + netif_wake_subqueue(lio->netdev, q); + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, + tx_restart, 1); + ret_val++; + } + } + + return ret_val; +} + +/** + * print_link_info - Print link information + * @netdev: network device + */ +static void print_link_info(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { + struct oct_link_info *linfo = &lio->linfo; + + if (linfo->link.s.link_up) { + netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", + linfo->link.s.speed, + (linfo->link.s.duplex) ? "Full" : "Half"); + } else { + netif_info(lio, link, lio->netdev, "Link Down\n"); + } + } +} + +/** + * octnet_link_status_change - Routine to notify MTU change + * @work: work_struct data structure + */ +static void octnet_link_status_change(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + + /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. + * this API is invoked only when new max-MTU of the interface is + * less than current MTU. + */ + rtnl_lock(); + dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); + rtnl_unlock(); +} + +/** + * setup_link_status_change_wq - Sets up the mtu status change work + * @netdev: network device + */ +static inline int setup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->link_status_wq.wq = alloc_workqueue("link-status", + WQ_MEM_RECLAIM, 0); + if (!lio->link_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, + octnet_link_status_change); + lio->link_status_wq.wk.ctxptr = lio; + + return 0; +} + +static inline void cleanup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->link_status_wq.wq) { + cancel_delayed_work_sync(&lio->link_status_wq.wk.work); + destroy_workqueue(lio->link_status_wq.wq); + } +} + +/** + * update_link_status - Update link status + * @netdev: network device + * @ls: link status structure + * + * Called on receipt of a link status response from the core application to + * update each interface's link status. + */ +static inline void update_link_status(struct net_device *netdev, + union oct_link_status *ls) +{ + struct lio *lio = GET_LIO(netdev); + int changed = (lio->linfo.link.u64 != ls->u64); + int current_max_mtu = lio->linfo.link.s.mtu; + struct octeon_device *oct = lio->oct_dev; + + dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", + __func__, lio->linfo.link.u64, ls->u64); + lio->linfo.link.u64 = ls->u64; + + if ((lio->intf_open) && (changed)) { + print_link_info(netdev); + lio->link_changes++; + + if (lio->linfo.link.s.link_up) { + dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); + netif_carrier_on(netdev); + wake_txqs(netdev); + } else { + dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); + netif_carrier_off(netdev); + stop_txqs(netdev); + } + if (lio->linfo.link.s.mtu != current_max_mtu) { + netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", + current_max_mtu, lio->linfo.link.s.mtu); + netdev->max_mtu = lio->linfo.link.s.mtu; + } + if (lio->linfo.link.s.mtu < netdev->mtu) { + dev_warn(&oct->pci_dev->dev, + "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", + netdev->mtu, lio->linfo.link.s.mtu); + queue_delayed_work(lio->link_status_wq.wq, + &lio->link_status_wq.wk.work, 0); + } + } +} + +/** + * lio_sync_octeon_time - send latest localtime to octeon firmware so that + * firmware will correct it's time, in case there is a time skew + * + * @work: work scheduled to send time update to octeon firmware + **/ +static void lio_sync_octeon_time(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + struct octeon_device *oct = lio->oct_dev; + struct octeon_soft_command *sc; + struct timespec64 ts; + struct lio_time *lt; + int ret; + + sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, + "Failed to sync time to octeon: soft command allocation failed\n"); + return; + } + + lt = (struct lio_time *)sc->virtdptr; + + /* Get time of the day */ + ktime_get_real_ts64(&ts); + lt->sec = ts.tv_sec; + lt->nsec = ts.tv_nsec; + octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + ret = octeon_send_soft_command(oct, sc); + if (ret == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, + "Failed to sync time to octeon: failed to send soft command\n"); + octeon_free_soft_command(oct, sc); + } else { + WRITE_ONCE(sc->caller_is_done, true); + } + + queue_delayed_work(lio->sync_octeon_time_wq.wq, + &lio->sync_octeon_time_wq.wk.work, + msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); +} + +/** + * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware + * + * @netdev: network device which should send time update to firmware + **/ +static inline int setup_sync_octeon_time_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->sync_octeon_time_wq.wq = + alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); + if (!lio->sync_octeon_time_wq.wq) { + dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, + lio_sync_octeon_time); + lio->sync_octeon_time_wq.wk.ctxptr = lio; + queue_delayed_work(lio->sync_octeon_time_wq.wq, + &lio->sync_octeon_time_wq.wk.work, + msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); + + return 0; +} + +/** + * cleanup_sync_octeon_time_wq - destroy wq + * + * @netdev: network device which should send time update to firmware + * + * Stop scheduling and destroy the work created to periodically update local + * time to octeon firmware. + **/ +static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; + + if (time_wq->wq) { + cancel_delayed_work_sync(&time_wq->wk.work); + destroy_workqueue(time_wq->wq); + } +} + +static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) +{ + struct octeon_device *other_oct; + + other_oct = lio_get_device(oct->octeon_id + 1); + + if (other_oct && other_oct->pci_dev) { + int oct_busnum, other_oct_busnum; + + oct_busnum = oct->pci_dev->bus->number; + other_oct_busnum = other_oct->pci_dev->bus->number; + + if (oct_busnum == other_oct_busnum) { + int oct_slot, other_oct_slot; + + oct_slot = PCI_SLOT(oct->pci_dev->devfn); + other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); + + if (oct_slot == other_oct_slot) + return other_oct; + } + } + + return NULL; +} + +static void disable_all_vf_links(struct octeon_device *oct) +{ + struct net_device *netdev; + int max_vfs, vf, i; + + if (!oct) + return; + + max_vfs = oct->sriov_info.max_vfs; + + for (i = 0; i < oct->ifcount; i++) { + netdev = oct->props[i].netdev; + if (!netdev) + continue; + + for (vf = 0; vf < max_vfs; vf++) + liquidio_set_vf_link_state(netdev, vf, + IFLA_VF_LINK_STATE_DISABLE); + } +} + +static int liquidio_watchdog(void *param) +{ + bool err_msg_was_printed[LIO_MAX_CORES]; + u16 mask_of_crashed_or_stuck_cores = 0; + bool all_vf_links_are_disabled = false; + struct octeon_device *oct = param; + struct octeon_device *other_oct; +#ifdef CONFIG_MODULE_UNLOAD + long refcount, vfs_referencing_pf; + u64 vfs_mask1, vfs_mask2; +#endif + int core; + + memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); + + while (!kthread_should_stop()) { + /* sleep for a couple of seconds so that we don't hog the CPU */ + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(2000)); + + mask_of_crashed_or_stuck_cores = + (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); + + if (!mask_of_crashed_or_stuck_cores) + continue; + + WRITE_ONCE(oct->cores_crashed, true); + other_oct = get_other_octeon_device(oct); + if (other_oct) + WRITE_ONCE(other_oct->cores_crashed, true); + + for (core = 0; core < LIO_MAX_CORES; core++) { + bool core_crashed_or_got_stuck; + + core_crashed_or_got_stuck = + (mask_of_crashed_or_stuck_cores + >> core) & 1; + + if (core_crashed_or_got_stuck && + !err_msg_was_printed[core]) { + dev_err(&oct->pci_dev->dev, + "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", + core); + err_msg_was_printed[core] = true; + } + } + + if (all_vf_links_are_disabled) + continue; + + disable_all_vf_links(oct); + disable_all_vf_links(other_oct); + all_vf_links_are_disabled = true; + +#ifdef CONFIG_MODULE_UNLOAD + vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); + vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); + + vfs_referencing_pf = hweight64(vfs_mask1); + vfs_referencing_pf += hweight64(vfs_mask2); + + refcount = module_refcount(THIS_MODULE); + if (refcount >= vfs_referencing_pf) { + while (vfs_referencing_pf) { + module_put(THIS_MODULE); + vfs_referencing_pf--; + } + } +#endif + } + + return 0; +} + +/** + * liquidio_probe - PCI probe handler + * @pdev: PCI device structure + * @ent: unused + */ +static int +liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent) +{ + struct octeon_device *oct_dev = NULL; + struct handshake *hs; + + oct_dev = octeon_allocate_device(pdev->device, + sizeof(struct octeon_device_priv)); + if (!oct_dev) { + dev_err(&pdev->dev, "Unable to allocate device\n"); + return -ENOMEM; + } + + if (pdev->device == OCTEON_CN23XX_PF_VID) + oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; + + /* Enable PTP for 6XXX Device */ + if (((pdev->device == OCTEON_CN66XX) || + (pdev->device == OCTEON_CN68XX))) + oct_dev->ptp_enable = true; + else + oct_dev->ptp_enable = false; + + dev_info(&pdev->dev, "Initializing device %x:%x.\n", + (u32)pdev->vendor, (u32)pdev->device); + + /* Assign octeon_device for this device to the private data area. */ + pci_set_drvdata(pdev, oct_dev); + + /* set linux specific device pointer */ + oct_dev->pci_dev = (void *)pdev; + + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + + hs = &handshake[oct_dev->octeon_id]; + init_completion(&hs->init); + init_completion(&hs->started); + hs->pci_dev = pdev; + + if (oct_dev->octeon_id == 0) + /* first LiquidIO NIC is detected */ + complete(&first_stage); + + if (octeon_device_init(oct_dev)) { + complete(&hs->init); + liquidio_remove(pdev); + return -ENOMEM; + } + + if (OCTEON_CN23XX_PF(oct_dev)) { + u8 bus, device, function; + + if (atomic_read(oct_dev->adapter_refcount) == 1) { + /* Each NIC gets one watchdog kernel thread. The first + * PF (of each NIC) that gets pci_driver->probe()'d + * creates that thread. + */ + bus = pdev->bus->number; + device = PCI_SLOT(pdev->devfn); + function = PCI_FUNC(pdev->devfn); + oct_dev->watchdog_task = kthread_run(liquidio_watchdog, + oct_dev, + "liowd/%02hhx:%02hhx.%hhx", + bus, device, function); + if (IS_ERR(oct_dev->watchdog_task)) { + oct_dev->watchdog_task = NULL; + dev_err(&oct_dev->pci_dev->dev, + "failed to create kernel_thread\n"); + liquidio_remove(pdev); + return -1; + } + } + } + + oct_dev->rx_pause = 1; + oct_dev->tx_pause = 1; + + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); + + return 0; +} + +static bool fw_type_is_auto(void) +{ + return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, + sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; +} + +/** + * octeon_pci_flr - PCI FLR for each Octeon device. + * @oct: octeon device + */ +static void octeon_pci_flr(struct octeon_device *oct) +{ + int rc; + + pci_save_state(oct->pci_dev); + + pci_cfg_access_lock(oct->pci_dev); + + /* Quiesce the device completely */ + pci_write_config_word(oct->pci_dev, PCI_COMMAND, + PCI_COMMAND_INTX_DISABLE); + + rc = __pci_reset_function_locked(oct->pci_dev); + + if (rc != 0) + dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", + rc, oct->pf_num); + + pci_cfg_access_unlock(oct->pci_dev); + + pci_restore_state(oct->pci_dev); +} + +/** + * octeon_destroy_resources - Destroy resources associated with octeon device + * @oct: octeon device + */ +static void octeon_destroy_resources(struct octeon_device *oct) +{ + int i, refcount; + struct msix_entry *msix_entries; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + struct handshake *hs; + + switch (atomic_read(&oct->status)) { + case OCT_DEV_RUNNING: + case OCT_DEV_CORE_OK: + + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + oct->app_mode = CVM_DRV_INVALID_APP; + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + schedule_timeout_uninterruptible(HZ / 10); + + fallthrough; + case OCT_DEV_HOST_OK: + + case OCT_DEV_CONSOLE_INIT_DONE: + /* Remove any consoles */ + octeon_remove_consoles(oct); + + fallthrough; + case OCT_DEV_IO_QUEUES_DONE: + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet + * processing to finish. + */ + oct->fn_list.disable_io_queues(oct); + + if (lio_wait_for_oq_pkts(oct)) + dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + fallthrough; + case OCT_DEV_INTR_SET_DONE: + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < oct->num_msix_irqs - 1; i++) { + if (oct->ioq_vector[i].vector) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } + } + /* non-iov vector's argument is oct struct */ + free_irq(msix_entries[i].vector, oct); + + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + } else { + /* Release the interrupt line */ + free_irq(oct->pci_dev->irq, oct); + + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + } + + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + + fallthrough; + case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: + if (OCTEON_CN23XX_PF(oct)) + octeon_free_ioq_vector(oct); + + fallthrough; + case OCT_DEV_MBOX_SETUP_DONE: + if (OCTEON_CN23XX_PF(oct)) + oct->fn_list.free_mbox(oct); + + fallthrough; + case OCT_DEV_IN_RESET: + case OCT_DEV_DROQ_INIT_DONE: + /* Wait for any pending operations */ + mdelay(100); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + /* Force any pending handshakes to complete */ + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + + if (hs->pci_dev) { + handshake[oct->octeon_id].init_ok = 0; + complete(&handshake[oct->octeon_id].init); + handshake[oct->octeon_id].started_ok = 0; + complete(&handshake[oct->octeon_id].started); + } + } + + fallthrough; + case OCT_DEV_RESP_LIST_INIT_DONE: + octeon_delete_response_list(oct); + + fallthrough; + case OCT_DEV_INSTR_QUEUE_INIT_DONE: + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } +#ifdef CONFIG_PCI_IOV + if (oct->sriov_info.sriov_enabled) + pci_disable_sriov(oct->pci_dev); +#endif + fallthrough; + case OCT_DEV_SC_BUFF_POOL_INIT_DONE: + octeon_free_sc_buffer_pool(oct); + + fallthrough; + case OCT_DEV_DISPATCH_INIT_DONE: + octeon_delete_dispatch_list(oct); + cancel_delayed_work_sync(&oct->nic_poll_work.work); + + fallthrough; + case OCT_DEV_PCI_MAP_DONE: + refcount = octeon_deregister_device(oct); + + /* Soft reset the octeon device before exiting. + * However, if fw was loaded from card (i.e. autoboot), + * perform an FLR instead. + * Implementation note: only soft-reset the device + * if it is a CN6XXX OR the LAST CN23XX device. + */ + if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) + octeon_pci_flr(oct); + else if (OCTEON_CN6XXX(oct) || !refcount) + oct->fn_list.soft_reset(oct); + + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + + fallthrough; + case OCT_DEV_PCI_ENABLE_DONE: + pci_clear_master(oct->pci_dev); + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + + fallthrough; + case OCT_DEV_BEGIN_STATE: + /* Nothing to be done here either */ + break; + } /* end switch (oct->status) */ + + tasklet_kill(&oct_priv->droq_tasklet); +} + +/** + * send_rx_ctrl_cmd - Send Rx control command + * @lio: per-network private data + * @start_stop: whether to start or stop + */ +static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) +{ + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + int retval; + + if (oct->props[lio->ifidx].rx_on == start_stop) + return 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + 16, 0); + if (!sc) { + netif_info(lio, rx_err, lio->netdev, + "Failed to allocate octeon_soft_command struct\n"); + return -ENOMEM; + } + + ncmd = (union octnet_cmd *)sc->virtdptr; + + ncmd->u64 = 0; + ncmd->s.cmd = OCTNET_CMD_RX_CTL; + ncmd->s.param1 = start_stop; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_CMD, 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); + } else { + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +/** + * liquidio_destroy_nic_device - Destroy NIC device interface + * @oct: octeon device + * @ifidx: which interface to destroy + * + * Cleanup associated with each interface for an Octeon device when NIC + * module is being unloaded or if initialization fails during load. + */ +static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) +{ + struct net_device *netdev = oct->props[ifidx].netdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + struct lio *lio; + + if (!netdev) { + dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", + __func__, ifidx); + return; + } + + lio = GET_LIO(netdev); + + dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) + liquidio_stop(netdev); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + + tasklet_enable(&oct_priv->droq_tasklet); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) + unregister_netdev(netdev); + + cleanup_sync_octeon_time_wq(netdev); + cleanup_link_status_change_wq(netdev); + + cleanup_rx_oom_poll_fn(netdev); + + lio_delete_glists(lio); + + free_netdev(netdev); + + oct->props[ifidx].gmxport = -1; + + oct->props[ifidx].netdev = NULL; +} + +/** + * liquidio_stop_nic_module - Stop complete NIC functionality + * @oct: octeon device + */ +static int liquidio_stop_nic_module(struct octeon_device *oct) +{ + int i, j; + struct lio *lio; + + dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + device_lock(&oct->pci_dev->dev); + if (oct->devlink) { + devlink_unregister(oct->devlink); + devlink_free(oct->devlink); + oct->devlink = NULL; + } + device_unlock(&oct->pci_dev->dev); + + if (!oct->ifcount) { + dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); + return 1; + } + + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + + lio_vf_rep_destroy(oct); + + for (i = 0; i < oct->ifcount; i++) { + lio = GET_LIO(oct->props[i].netdev); + for (j = 0; j < oct->num_oqs; j++) + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); + } + + for (i = 0; i < oct->ifcount; i++) + liquidio_destroy_nic_device(oct, i); + + dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); + return 0; +} + +/** + * liquidio_remove - Cleans up resources at unload time + * @pdev: PCI device structure + */ +static void liquidio_remove(struct pci_dev *pdev) +{ + struct octeon_device *oct_dev = pci_get_drvdata(pdev); + + dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); + + if (oct_dev->watchdog_task) + kthread_stop(oct_dev->watchdog_task); + + if (!oct_dev->octeon_id && + oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) + lio_vf_rep_modexit(); + + if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) + liquidio_stop_nic_module(oct_dev); + + /* Reset the octeon device and cleanup all memory allocated for + * the octeon device by driver. + */ + octeon_destroy_resources(oct_dev); + + dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); + + /* This octeon device has been removed. Update the global + * data structure to reflect this. Free the device structure. + */ + octeon_free_device_mem(oct_dev); +} + +/** + * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space + * @oct: octeon device + */ +static int octeon_chip_specific_setup(struct octeon_device *oct) +{ + u32 dev_id, rev_id; + int ret = 1; + + pci_read_config_dword(oct->pci_dev, 0, &dev_id); + pci_read_config_dword(oct->pci_dev, 8, &rev_id); + oct->rev_id = rev_id & 0xff; + + switch (dev_id) { + case OCTEON_CN68XX_PCIID: + oct->chip_id = OCTEON_CN68XX; + ret = lio_setup_cn68xx_octeon_device(oct); + break; + + case OCTEON_CN66XX_PCIID: + oct->chip_id = OCTEON_CN66XX; + ret = lio_setup_cn66xx_octeon_device(oct); + break; + + case OCTEON_CN23XX_PCIID_PF: + oct->chip_id = OCTEON_CN23XX_PF_VID; + ret = setup_cn23xx_octeon_pf_device(oct); + if (ret) + break; +#ifdef CONFIG_PCI_IOV + if (!ret) + pci_sriov_set_totalvfs(oct->pci_dev, + oct->sriov_info.max_vfs); +#endif + break; + + default: + dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", + dev_id); + } + + return ret; +} + +/** + * octeon_pci_os_setup - PCI initialization for each Octeon device. + * @oct: octeon device + */ +static int octeon_pci_os_setup(struct octeon_device *oct) +{ + /* setup PCI stuff first */ + if (pci_enable_device(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); + return 1; + } + + if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { + dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); + pci_disable_device(oct->pci_dev); + return 1; + } + + /* Enable PCI DMA Master. */ + pci_set_master(oct->pci_dev); + + return 0; +} + +/** + * free_netbuf - Unmap and free network buffer + * @buf: buffer + */ +static void free_netbuf(void *buf) +{ + struct sk_buff *skb; + struct octnet_buf_free_info *finfo; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, + DMA_TO_DEVICE); + + tx_buffer_free(skb); +} + +/** + * free_netsgbuf - Unmap and free gather buffer + * @buf: buffer + */ +static void free_netsgbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + struct octnic_gather *g; + int i, frags, iq; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + dma_unmap_page(&lio->oct_dev->pci_dev->dev, + g->sg[(i >> 2)].ptr[(i & 3)], + skb_frag_size(frag), DMA_TO_DEVICE); + i++; + } + + iq = skb_iq(lio->oct_dev, skb); + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + tx_buffer_free(skb); +} + +/** + * free_netsgbuf_with_resp - Unmap and free gather buffer with response + * @buf: buffer + */ +static void free_netsgbuf_with_resp(void *buf) +{ + struct octeon_soft_command *sc; + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + struct octnic_gather *g; + int i, frags, iq; + + sc = (struct octeon_soft_command *)buf; + skb = (struct sk_buff *)sc->callback_arg; + finfo = (struct octnet_buf_free_info *)&skb->cb; + + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + dma_unmap_page(&lio->oct_dev->pci_dev->dev, + g->sg[(i >> 2)].ptr[(i & 3)], + skb_frag_size(frag), DMA_TO_DEVICE); + i++; + } + + iq = skb_iq(lio->oct_dev, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + /* Don't free the skb yet */ +} + +/** + * liquidio_ptp_adjfreq - Adjust ptp frequency + * @ptp: PTP clock info + * @ppb: how much to adjust by, in parts-per-billion + */ +static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + u64 comp, delta; + unsigned long flags; + bool neg_adj = false; + + if (ppb < 0) { + neg_adj = true; + ppb = -ppb; + } + + /* The hardware adds the clock compensation value to the + * PTP clock on every coprocessor clock cycle, so we + * compute the delta in terms of coprocessor clocks. + */ + delta = (u64)ppb << 32; + do_div(delta, oct->coproc_clock_rate); + + spin_lock_irqsave(&lio->ptp_lock, flags); + comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); + if (neg_adj) + comp -= delta; + else + comp += delta; + lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * liquidio_ptp_adjtime - Adjust ptp time + * @ptp: PTP clock info + * @delta: how much to adjust by, in nanosecs + */ +static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + + spin_lock_irqsave(&lio->ptp_lock, flags); + lio->ptp_adjust += delta; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * liquidio_ptp_gettime - Get hardware clock time, including any adjustment + * @ptp: PTP clock info + * @ts: timespec + */ +static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + spin_lock_irqsave(&lio->ptp_lock, flags); + ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); + ns += lio->ptp_adjust; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * liquidio_ptp_settime - Set hardware clock time. Reset adjustment + * @ptp: PTP clock info + * @ts: timespec + */ +static int liquidio_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + ns = timespec64_to_ns(ts); + + spin_lock_irqsave(&lio->ptp_lock, flags); + lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); + lio->ptp_adjust = 0; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * liquidio_ptp_enable - Check if PTP is enabled + * @ptp: PTP clock info + * @rq: request + * @on: is it on + */ +static int +liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp, + struct ptp_clock_request __maybe_unused *rq, + int __maybe_unused on) +{ + return -EOPNOTSUPP; +} + +/** + * oct_ptp_open - Open PTP clock source + * @netdev: network device + */ +static void oct_ptp_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + spin_lock_init(&lio->ptp_lock); + + snprintf(lio->ptp_info.name, 16, "%s", netdev->name); + lio->ptp_info.owner = THIS_MODULE; + lio->ptp_info.max_adj = 250000000; + lio->ptp_info.n_alarm = 0; + lio->ptp_info.n_ext_ts = 0; + lio->ptp_info.n_per_out = 0; + lio->ptp_info.pps = 0; + lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; + lio->ptp_info.adjtime = liquidio_ptp_adjtime; + lio->ptp_info.gettime64 = liquidio_ptp_gettime; + lio->ptp_info.settime64 = liquidio_ptp_settime; + lio->ptp_info.enable = liquidio_ptp_enable; + + lio->ptp_adjust = 0; + + lio->ptp_clock = ptp_clock_register(&lio->ptp_info, + &oct->pci_dev->dev); + + if (IS_ERR(lio->ptp_clock)) + lio->ptp_clock = NULL; +} + +/** + * liquidio_ptp_init - Init PTP clock + * @oct: octeon device + */ +static void liquidio_ptp_init(struct octeon_device *oct) +{ + u64 clock_comp, cfg; + + clock_comp = (u64)NSEC_PER_SEC << 32; + do_div(clock_comp, oct->coproc_clock_rate); + lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); + + /* Enable */ + cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); + lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); +} + +/** + * load_firmware - Load firmware to device + * @oct: octeon device + * + * Maps device to firmware filename, requests firmware, and downloads it + */ +static int load_firmware(struct octeon_device *oct) +{ + int ret = 0; + const struct firmware *fw; + char fw_name[LIO_MAX_FW_FILENAME_LEN]; + char *tmp_fw_type; + + if (fw_type_is_auto()) { + tmp_fw_type = LIO_FW_NAME_TYPE_NIC; + strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); + } else { + tmp_fw_type = fw_type; + } + + sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, + octeon_get_conf(oct)->card_name, tmp_fw_type, + LIO_FW_NAME_SUFFIX); + + ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); + if (ret) { + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", + fw_name); + release_firmware(fw); + return ret; + } + + ret = octeon_download_firmware(oct, fw->data, fw->size); + + release_firmware(fw); + + return ret; +} + +/** + * octnet_poll_check_txq_status - Poll routine for checking transmit queue status + * @work: work_struct data structure + */ +static void octnet_poll_check_txq_status(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) + return; + + check_txq_status(lio); + queue_delayed_work(lio->txq_status_wq.wq, + &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); +} + +/** + * setup_tx_poll_fn - Sets up the txq poll check + * @netdev: network device + */ +static inline int setup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->txq_status_wq.wq = alloc_workqueue("txq-status", + WQ_MEM_RECLAIM, 0); + if (!lio->txq_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, + octnet_poll_check_txq_status); + lio->txq_status_wq.wk.ctxptr = lio; + queue_delayed_work(lio->txq_status_wq.wq, + &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); + return 0; +} + +static inline void cleanup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->txq_status_wq.wq) { + cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); + destroy_workqueue(lio->txq_status_wq.wq); + } +} + +/** + * liquidio_open - Net device open for LiquidIO + * @netdev: network device + */ +static int liquidio_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + int ret = 0; + + if (oct->props[lio->ifidx].napi_enabled == 0) { + tasklet_disable(&oct_priv->droq_tasklet); + + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 1; + } + + if (oct->ptp_enable) + oct_ptp_open(netdev); + + ifstate_set(lio, LIO_IFSTATE_RUNNING); + + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) { + ret = setup_tx_poll_fn(netdev); + if (ret) + goto err_poll; + } + + netif_tx_start_all_queues(netdev); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); + + /* tell Octeon to start forwarding packets to host */ + ret = send_rx_ctrl_cmd(lio, 1); + if (ret) + goto err_rx_ctrl; + + /* start periodical statistics fetch */ + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", + netdev->name); + + return 0; + +err_rx_ctrl: + if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) + cleanup_tx_poll_fn(netdev); +err_poll: + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + + return ret; +} + +/** + * liquidio_stop - Net device stop for LiquidIO + * @netdev: network device + */ +static int liquidio_stop(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + int ret = 0; + + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + /* Stop any link updates */ + lio->intf_open = 0; + + stop_txqs(netdev); + + /* Inform that netif carrier is down */ + netif_carrier_off(netdev); + netif_tx_disable(netdev); + + lio->linfo.link.s.link_up = 0; + lio->link_changes++; + + /* Tell Octeon that nic interface is down. */ + ret = send_rx_ctrl_cmd(lio, 0); + if (ret) + return ret; + + if (OCTEON_CN23XX_PF(oct)) { + if (!oct->msix_on) + cleanup_tx_poll_fn(netdev); + } else { + cleanup_tx_poll_fn(netdev); + } + + cancel_delayed_work_sync(&lio->stats_wk.work); + + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + /* Wait for any pending Rx descriptors */ + if (lio_wait_for_clean_oq(oct)) + netif_info(lio, rx_err, lio->netdev, + "Proceeding with stop interface after partial RX desc processing\n"); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); + } + + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); + + return ret; +} + +/** + * get_new_flags - Converts a mask based on net device flags + * @netdev: network device + * + * This routine generates a octnet_ifflags mask from the net device flags + * received from the OS. + */ +static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) +{ + enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; + + if (netdev->flags & IFF_PROMISC) + f |= OCTNET_IFFLAG_PROMISC; + + if (netdev->flags & IFF_ALLMULTI) + f |= OCTNET_IFFLAG_ALLMULTI; + + if (netdev->flags & IFF_MULTICAST) { + f |= OCTNET_IFFLAG_MULTICAST; + + /* Accept all multicast addresses if there are more than we + * can handle + */ + if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) + f |= OCTNET_IFFLAG_ALLMULTI; + } + + if (netdev->flags & IFF_BROADCAST) + f |= OCTNET_IFFLAG_BROADCAST; + + return f; +} + +/** + * liquidio_set_mcast_list - Net device set_multicast_list + * @netdev: network device + */ +static void liquidio_set_mcast_list(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct netdev_hw_addr *ha; + u64 *mc; + int ret; + int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + /* Create a ctrl pkt command to be sent to core app. */ + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; + nctrl.ncmd.s.more = mc_count; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + mc = &nctrl.udd[0]; + netdev_for_each_mc_addr(ha, netdev) { + *mc = 0; + memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); + /* no need to swap bytes */ + + if (++mc > &nctrl.udd[mc_count]) + break; + } + + /* Apparently, any activity in this call from the kernel has to + * be atomic. So we won't wait for response. + */ + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", + ret); + } +} + +/** + * liquidio_set_mac - Net device set_mac_address + * @netdev: network device + * @p: pointer to sockaddr + */ +static int liquidio_set_mac(struct net_device *netdev, void *p) +{ + int ret = 0; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct sockaddr *addr = (struct sockaddr *)p; + struct octnic_ctrl_pkt nctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + nctrl.ncmd.s.param1 = 0; + nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); + return -ENOMEM; + } + + if (nctrl.sc_status) { + dev_err(&oct->pci_dev->dev, + "%s: MAC Address change failed. sc return=%x\n", + __func__, nctrl.sc_status); + return -EIO; + } + + eth_hw_addr_set(netdev, addr->sa_data); + memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); + + return 0; +} + +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct; + u64 pkts = 0, drop = 0, bytes = 0; + struct oct_droq_stats *oq_stats; + struct oct_iq_stats *iq_stats; + int i, iq_no, oq_no; + + oct = lio->oct_dev; + + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + + for (i = 0; i < oct->num_iqs; i++) { + iq_no = lio->linfo.txpciq[i].s.q_no; + iq_stats = &oct->instr_queue[iq_no]->stats; + pkts += iq_stats->tx_done; + drop += iq_stats->tx_dropped; + bytes += iq_stats->tx_tot_bytes; + } + + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; + + pkts = 0; + drop = 0; + bytes = 0; + + for (i = 0; i < oct->num_oqs; i++) { + oq_no = lio->linfo.rxpciq[i].s.q_no; + oq_stats = &oct->droq[oq_no]->stats; + pkts += oq_stats->rx_pkts_received; + drop += (oq_stats->rx_dropped + + oq_stats->dropped_nodispatch + + oq_stats->dropped_toomany + + oq_stats->dropped_nomem); + bytes += oq_stats->rx_bytes_received; + } + + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + lstats->collisions = oct->link_stats.fromhost.total_collisions; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; + /* recv'r fifo overrun */ + lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; + + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors + lstats->rx_fifo_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; + lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; + + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors + + lstats->tx_fifo_errors; +} + +/** + * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl + * @netdev: network device + * @ifr: interface request + */ +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + struct hwtstamp_config conf; + struct lio *lio = GET_LIO(netdev); + + if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) + return -EFAULT; + + switch (conf.tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + switch (conf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + conf.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + else + ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; +} + +/** + * liquidio_ioctl - ioctl handler + * @netdev: network device + * @ifr: interface request + * @cmd: command + */ +static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct lio *lio = GET_LIO(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + if (lio->oct_dev->ptp_enable) + return hwtstamp_ioctl(netdev, ifr); + fallthrough; + default: + return -EOPNOTSUPP; + } +} + +/** + * handle_timestamp - handle a Tx timestamp response + * @oct: octeon device + * @status: response status + * @buf: pointer to skb + */ +static void handle_timestamp(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octeon_soft_command *sc; + struct oct_timestamp_resp *resp; + struct lio *lio; + struct sk_buff *skb = (struct sk_buff *)buf; + + finfo = (struct octnet_buf_free_info *)skb->cb; + lio = finfo->lio; + sc = finfo->sc; + oct = lio->oct_dev; + resp = (struct oct_timestamp_resp *)sc->virtrptr; + + if (status != OCTEON_REQUEST_DONE) { + dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", + CVM_CAST64(status)); + resp->timestamp = 0; + } + + octeon_swap_8B_data(&resp->timestamp, 1); + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { + struct skb_shared_hwtstamps ts; + u64 ns = resp->timestamp; + + netif_info(lio, tx_done, lio->netdev, + "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", + skb, (unsigned long long)ns); + ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); + skb_tstamp_tx(skb, &ts); + } + + octeon_free_soft_command(oct, sc); + tx_buffer_free(skb); +} + +/** + * send_nic_timestamp_pkt - Send a data packet that will be timestamped + * @oct: octeon device + * @ndata: pointer to network data + * @finfo: pointer to private network data + * @xmit_more: more is coming + */ +static inline int send_nic_timestamp_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + struct octnet_buf_free_info *finfo, + int xmit_more) +{ + int retval; + struct octeon_soft_command *sc; + struct lio *lio; + int ring_doorbell; + u32 len; + + lio = finfo->lio; + + sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, + sizeof(struct oct_timestamp_resp)); + finfo->sc = sc; + + if (!sc) { + dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); + return IQ_SEND_FAILED; + } + + if (ndata->reqtype == REQTYPE_NORESP_NET) + ndata->reqtype = REQTYPE_RESP_NET; + else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) + ndata->reqtype = REQTYPE_RESP_NET_SG; + + sc->callback = handle_timestamp; + sc->callback_arg = finfo->skb; + sc->iq_no = ndata->q_no; + + if (OCTEON_CN23XX_PF(oct)) + len = (u32)((struct octeon_instr_ih3 *) + (&sc->cmd.cmd3.ih3))->dlengsz; + else + len = (u32)((struct octeon_instr_ih2 *) + (&sc->cmd.cmd2.ih2))->dlengsz; + + ring_doorbell = !xmit_more; + + retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, + sc, len, ndata->reqtype); + + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", + retval); + octeon_free_soft_command(oct, sc); + } else { + netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); + } + + return retval; +} + +/** + * liquidio_xmit - Transmit networks packets to the Octeon interface + * @skb: skbuff struct to be passed to network layer. + * @netdev: pointer to network device + * + * Return: whether the packet was transmitted to the device okay or not + * (NETDEV_TX_OK or NETDEV_TX_BUSY) + */ +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct lio *lio; + struct octnet_buf_free_info *finfo; + union octnic_cmd_setup cmdsetup; + struct octnic_data_pkt ndata; + struct octeon_device *oct; + struct oct_iq_stats *stats; + struct octeon_instr_irh *irh; + union tx_info *tx_info; + int status = 0; + int q_idx = 0, iq_no = 0; + int j, xmit_more = 0; + u64 dptr = 0; + u32 tag = 0; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + q_idx = skb_iq(oct, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; + + stats = &oct->instr_queue[iq_no]->stats; + + /* Check for all conditions in which the current packet cannot be + * transmitted. + */ + if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || + (!lio->linfo.link.s.link_up) || + (skb->len <= 0)) { + netif_info(lio, tx_err, lio->netdev, + "Transmit failed link_status : %d\n", + lio->linfo.link.s.link_up); + goto lio_xmit_failed; + } + + /* Use space in skb->cb to store info used to unmap and + * free the buffers. + */ + finfo = (struct octnet_buf_free_info *)skb->cb; + finfo->lio = lio; + finfo->skb = skb; + finfo->sc = NULL; + + /* Prepare the attributes for the data to be passed to OSI. */ + memset(&ndata, 0, sizeof(struct octnic_data_pkt)); + + ndata.buf = (void *)finfo; + + ndata.q_no = iq_no; + + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; + } + + /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", + * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); + */ + + ndata.datasize = skb->len; + + cmdsetup.u64 = 0; + cmdsetup.s.iq_no = iq_no; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) { + cmdsetup.s.tnl_csum = 1; + stats->tx_vxlan++; + } else { + cmdsetup.s.transport_csum = 1; + } + } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + cmdsetup.s.timestamp = 1; + } + + if (skb_shinfo(skb)->nr_frags == 0) { + cmdsetup.s.u.datasize = skb->len; + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + + /* Offload checksum calculation for TCP/UDP packets */ + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", + __func__); + stats->tx_dmamap_fail++; + return NETDEV_TX_BUSY; + } + + if (OCTEON_CN23XX_PF(oct)) + ndata.cmd.cmd3.dptr = dptr; + else + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; + ndata.reqtype = REQTYPE_NORESP_NET; + + } else { + int i, frags; + skb_frag_t *frag; + struct octnic_gather *g; + + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); + + if (!g) { + netif_info(lio, tx_err, lio->netdev, + "Transmit scatter gather: glist null!\n"); + goto lio_xmit_failed; + } + + cmdsetup.s.gather = 1; + cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + + memset(g->sg, 0, g->sg_size); + + g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, + skb->data, + (skb->len - skb->data_len), + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", + __func__); + stats->tx_dmamap_fail++; + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); + + frags = skb_shinfo(skb)->nr_frags; + i = 1; + while (frags--) { + frag = &skb_shinfo(skb)->frags[i - 1]; + + g->sg[(i >> 2)].ptr[(i & 3)] = + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + skb_frag_size(frag), + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); + i++; + } + + dptr = g->sg_dma_ptr; + + if (OCTEON_CN23XX_PF(oct)) + ndata.cmd.cmd3.dptr = dptr; + else + ndata.cmd.cmd2.dptr = dptr; + finfo->dptr = dptr; + finfo->g = g; + + ndata.reqtype = REQTYPE_NORESP_NET_SG; + } + + if (OCTEON_CN23XX_PF(oct)) { + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; + } else { + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; + } + + if (skb_shinfo(skb)->gso_size) { + tx_info->s.gso_size = skb_shinfo(skb)->gso_size; + tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + stats->tx_gso++; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> 13; + irh->vlan = skb_vlan_tag_get(skb) & 0xfff; + } + + xmit_more = netdev_xmit_more(); + + if (unlikely(cmdsetup.s.timestamp)) + status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); + else + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); + if (status == IQ_SEND_FAILED) + goto lio_xmit_failed; + + netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); + + if (status == IQ_SEND_STOP) + netif_stop_subqueue(netdev, q_idx); + + netif_trans_update(netdev); + + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; + else + stats->tx_done++; + stats->tx_tot_bytes += ndata.datasize; + + return NETDEV_TX_OK; + +lio_xmit_failed: + stats->tx_dropped++; + netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", + iq_no, stats->tx_dropped); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + + octeon_ring_doorbell_locked(oct, iq_no); + + tx_buffer_free(skb); + return NETDEV_TX_OK; +} + +/** + * liquidio_tx_timeout - Network device Tx timeout + * @netdev: pointer to network device + * @txqueue: index of the hung transmit queue + */ +static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct lio *lio; + + lio = GET_LIO(netdev); + + netif_info(lio, tx_err, lio->netdev, + "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", + netdev->stats.tx_dropped); + netif_trans_update(netdev); + wake_txqs(netdev); +} + +static int liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + + return ret; +} + +static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), + u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +/** + * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload + * @netdev: pointer to network device + * @command: OCTNET_CMD_TNL_RX_CSUM_CTL + * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE + * Returns: SUCCESS or FAILURE + */ +static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.param1 = rx_cmd; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +/** + * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware + * @netdev: pointer to network device + * @command: OCTNET_CMD_VXLAN_PORT_CONFIG + * @vxlan_port: VxLAN port to be added or deleted + * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD, + * OCTNET_CMD_VXLAN_PORT_DEL + * Return: SUCCESS or FAILURE + */ +static int liquidio_vxlan_port_command(struct net_device *netdev, int command, + u16 vxlan_port, u8 vxlan_cmd_bit) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.more = vxlan_cmd_bit; + nctrl.ncmd.s.param1 = vxlan_port; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "VxLAN port add/delete failed in core (ret:0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +static int liquidio_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, + unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + +static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { + .set_port = liquidio_udp_tunnel_set_port, + .unset_port = liquidio_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + +/** + * liquidio_fix_features - Net device fix features + * @netdev: pointer to network device + * @request: features requested + * Return: updated features list + */ +static netdev_features_t liquidio_fix_features(struct net_device *netdev, + netdev_features_t request) +{ + struct lio *lio = netdev_priv(netdev); + + if ((request & NETIF_F_RXCSUM) && + !(lio->dev_capability & NETIF_F_RXCSUM)) + request &= ~NETIF_F_RXCSUM; + + if ((request & NETIF_F_HW_CSUM) && + !(lio->dev_capability & NETIF_F_HW_CSUM)) + request &= ~NETIF_F_HW_CSUM; + + if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) + request &= ~NETIF_F_TSO; + + if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) + request &= ~NETIF_F_TSO6; + + if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + /*Disable LRO if RXCSUM is off */ + if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && + !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) + request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + return request; +} + +/** + * liquidio_set_features - Net device set features + * @netdev: pointer to network device + * @features: features to enable/disable + */ +static int liquidio_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lio *lio = netdev_priv(netdev); + + if ((features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO) && + !(netdev->features & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + else if (!(features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO) && + (netdev->features & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + /* Sending command to firmware to enable/disable RX checksum + * offload settings using ethtool + */ + if (!(netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + (features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, + OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + else if ((netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + !(features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_DISABLE); + + if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && + (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && + !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, + OCTNET_CMD_VLAN_FILTER_ENABLE); + else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && + (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && + (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) + liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, + OCTNET_CMD_VLAN_FILTER_DISABLE); + + return 0; +} + +static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, + u8 *mac, bool is_admin_assigned) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + if (!is_valid_ether_addr(mac)) + return -EINVAL; + + if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) + return -EINVAL; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + nctrl.ncmd.s.param1 = vfidx + 1; + nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + if (is_admin_assigned) { + nctrl.ncmd.s.param2 = true; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + } + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); + + oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; + + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret > 0) + ret = -EIO; + + return ret; +} + +static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int retval; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); + if (!retval) + cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); + + return retval; +} + +static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, + bool enable) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int retval; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { + netif_info(lio, drv, lio->netdev, + "firmware does not support spoofchk\n"); + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (enable) { + if (oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } else { + /* Clear */ + if (!oct->sriov_info.vf_spoofchk[vfidx]) + return 0; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, + * but vf_num (param1) is 1 based + */ + nctrl.ncmd.s.param2 = enable; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = NULL; + + retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); + + if (retval) { + netif_info(lio, drv, lio->netdev, + "Failed to set VF %d spoofchk %s\n", vfidx, + enable ? "on" : "off"); + return -1; + } + + oct->sriov_info.vf_spoofchk[vfidx] = enable; + netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, + enable ? "on" : "off"); + + return 0; +} + +static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, + u16 vlan, u8 qos, __be16 vlan_proto) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + u16 vlantci; + int ret = 0; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + if (vlan_proto != htons(ETH_P_8021Q)) + return -EPROTONOSUPPORT; + + if (vlan >= VLAN_N_VID || qos > 7) + return -EINVAL; + + if (vlan) + vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; + else + vlantci = 0; + + if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) + return 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + if (vlan) + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + else + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + + nctrl.ncmd.s.param1 = vlantci; + nctrl.ncmd.s.param2 = + vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = NULL; + + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + if (ret) { + if (ret > 0) + ret = -EIO; + return ret; + } + + oct->sriov_info.vf_vlantci[vfidx] = vlantci; + + return ret; +} + +static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, + struct ifla_vf_info *ivi) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + u8 *macaddr; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + memset(ivi, 0, sizeof(struct ifla_vf_info)); + + ivi->vf = vfidx; + macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; + ether_addr_copy(&ivi->mac[0], macaddr); + ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; + ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; + if (oct->sriov_info.trusted_vf.active && + oct->sriov_info.trusted_vf.id == vfidx) + ivi->trusted = true; + else + ivi->trusted = false; + ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; + ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; + ivi->max_tx_rate = lio->linfo.link.s.speed; + ivi->min_tx_rate = 0; + + return 0; +} + +static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) +{ + struct octeon_device *oct = lio->oct_dev; + struct octeon_soft_command *sc; + int retval; + + sc = octeon_alloc_soft_command(oct, 0, 16, 0); + if (!sc) + return -ENOMEM; + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + /* vfidx is 0 based, but vf_num (param1) is 1 based */ + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, + trusted); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct, sc); + retval = -1; + } else { + /* Wait for response or timeout */ + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); + + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, + bool setting) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { + /* trusted vf is not supported by firmware older than 1.7.1 */ + return -EOPNOTSUPP; + } + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { + netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); + return -EINVAL; + } + + if (setting) { + /* Set */ + + if (oct->sriov_info.trusted_vf.active && + oct->sriov_info.trusted_vf.id == vfidx) + return 0; + + if (oct->sriov_info.trusted_vf.active) { + netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); + return -EPERM; + } + } else { + /* Clear */ + + if (!oct->sriov_info.trusted_vf.active) + return 0; + } + + if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { + if (setting) { + oct->sriov_info.trusted_vf.id = vfidx; + oct->sriov_info.trusted_vf.active = true; + } else { + oct->sriov_info.trusted_vf.active = false; + } + + netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, + setting ? "" : "not "); + } else { + netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); + return -1; + } + + return 0; +} + +static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, + int linkstate) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) + return 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; + nctrl.ncmd.s.param1 = + vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ + nctrl.ncmd.s.param2 = linkstate; + nctrl.ncmd.s.more = 0; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.cb_fn = NULL; + + ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); + + if (!ret) + oct->sriov_info.vf_linkstate[vfidx] = linkstate; + else if (ret > 0) + ret = -EIO; + + return ret; +} + +static int +liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct lio_devlink_priv *priv; + struct octeon_device *oct; + + priv = devlink_priv(devlink); + oct = priv->oct; + + *mode = oct->eswitch_mode; + + return 0; +} + +static int +liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct lio_devlink_priv *priv; + struct octeon_device *oct; + int ret = 0; + + priv = devlink_priv(devlink); + oct = priv->oct; + + if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) + return -EINVAL; + + if (oct->eswitch_mode == mode) + return 0; + + switch (mode) { + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + oct->eswitch_mode = mode; + ret = lio_vf_rep_create(oct); + break; + + case DEVLINK_ESWITCH_MODE_LEGACY: + lio_vf_rep_destroy(oct); + oct->eswitch_mode = mode; + break; + + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct devlink_ops liquidio_devlink_ops = { + .eswitch_mode_get = liquidio_eswitch_mode_get, + .eswitch_mode_set = liquidio_eswitch_mode_set, +}; + +static int +liquidio_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return -EOPNOTSUPP; + + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); + + return 0; +} + +static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, + struct ifla_vf_stats *vf_stats) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_vf_stats stats; + int ret; + + if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) + return -EINVAL; + + memset(&stats, 0, sizeof(struct oct_vf_stats)); + ret = cn23xx_get_vf_stats(oct, vfidx, &stats); + if (!ret) { + vf_stats->rx_packets = stats.rx_packets; + vf_stats->tx_packets = stats.tx_packets; + vf_stats->rx_bytes = stats.rx_bytes; + vf_stats->tx_bytes = stats.tx_bytes; + vf_stats->broadcast = stats.broadcast; + vf_stats->multicast = stats.multicast; + } + + return ret; +} + +static const struct net_device_ops lionetdevops = { + .ndo_open = liquidio_open, + .ndo_stop = liquidio_stop, + .ndo_start_xmit = liquidio_xmit, + .ndo_get_stats64 = liquidio_get_stats64, + .ndo_set_mac_address = liquidio_set_mac, + .ndo_set_rx_mode = liquidio_set_mcast_list, + .ndo_tx_timeout = liquidio_tx_timeout, + + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, + .ndo_change_mtu = liquidio_change_mtu, + .ndo_eth_ioctl = liquidio_ioctl, + .ndo_fix_features = liquidio_fix_features, + .ndo_set_features = liquidio_set_features, + .ndo_set_vf_mac = liquidio_set_vf_mac, + .ndo_set_vf_vlan = liquidio_set_vf_vlan, + .ndo_get_vf_config = liquidio_get_vf_config, + .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, + .ndo_set_vf_trust = liquidio_set_vf_trust, + .ndo_set_vf_link_state = liquidio_set_vf_link_state, + .ndo_get_vf_stats = liquidio_get_vf_stats, + .ndo_get_port_parent_id = liquidio_get_port_parent_id, +}; + +/** + * liquidio_init - Entry point for the liquidio module + */ +static int __init liquidio_init(void) +{ + int i; + struct handshake *hs; + + init_completion(&first_stage); + + octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); + + if (liquidio_init_pci()) + return -EINVAL; + + wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + if (hs->pci_dev) { + wait_for_completion(&hs->init); + if (!hs->init_ok) { + /* init handshake failed */ + dev_err(&hs->pci_dev->dev, + "Failed to init device\n"); + liquidio_deinit_pci(); + return -EIO; + } + } + } + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + if (hs->pci_dev) { + wait_for_completion_timeout(&hs->started, + msecs_to_jiffies(30000)); + if (!hs->started_ok) { + /* starter handshake failed */ + dev_err(&hs->pci_dev->dev, + "Firmware failed to start\n"); + liquidio_deinit_pci(); + return -EIO; + } + } + } + + return 0; +} + +static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + int gmxport = 0; + union oct_link_status *ls; + int i; + + if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { + dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", + recv_pkt->buffer_size[0], + recv_pkt->rh.r_nic_info.gmxport); + goto nic_info_err; + } + + gmxport = recv_pkt->rh.r_nic_info.gmxport; + ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + + OCT_DROQ_INFO_SIZE); + + octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } + +nic_info_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +/** + * setup_nic_devices - Setup network interfaces + * @octeon_dev: octeon device + * + * Called during init time for each device. It assumes the NIC + * is already up and running. The link information for each + * interface is passed in link_info. + */ +static int setup_nic_devices(struct octeon_device *octeon_dev) +{ + struct lio *lio = NULL; + struct net_device *netdev; + u8 mac[6], i, j, *fw_ver, *micro_ver; + unsigned long micro; + u32 cur_ver; + struct octeon_soft_command *sc; + struct liquidio_if_cfg_resp *resp; + struct octdev_props *props; + int retval, num_iqueues, num_oqueues; + int max_num_queues = 0; + union oct_nic_if_cfg if_cfg; + unsigned int base_queue; + unsigned int gmx_port_id; + u32 resp_size, data_size; + u32 ifidx_or_pfnum; + struct lio_version *vdata; + struct devlink *devlink; + struct lio_devlink_priv *lio_devlink; + + /* This is to handle link status changes */ + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_INFO, + lio_nic_info, octeon_dev); + + /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. + * They are handled directly. + */ + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, + free_netbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, + free_netsgbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, + free_netsgbuf_with_resp); + + for (i = 0; i < octeon_dev->ifcount; i++) { + resp_size = sizeof(struct liquidio_if_cfg_resp); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(octeon_dev, data_size, + resp_size, 0); + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + vdata = (struct lio_version *)sc->virtdptr; + + *((u64 *)vdata) = 0; + vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + if (OCTEON_CN23XX_PF(octeon_dev)) { + num_iqueues = octeon_dev->sriov_info.num_pf_rings; + num_oqueues = octeon_dev->sriov_info.num_pf_rings; + base_queue = octeon_dev->sriov_info.pf_srn; + + gmx_port_id = octeon_dev->pf_num; + ifidx_or_pfnum = octeon_dev->pf_num; + } else { + num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( + octeon_get_conf(octeon_dev), i); + num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( + octeon_get_conf(octeon_dev), i); + base_queue = CFG_GET_BASE_QUE_NIC_IF( + octeon_get_conf(octeon_dev), i); + gmx_port_id = CFG_GET_GMXID_NIC_IF( + octeon_get_conf(octeon_dev), i); + ifidx_or_pfnum = i; + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "requesting config for interface %d, iqs %d, oqs %d\n", + ifidx_or_pfnum, num_iqueues, num_oqueues); + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = num_iqueues; + if_cfg.s.num_oqueues = num_oqueues; + if_cfg.s.base_queue = base_queue; + if_cfg.s.gmx_port_id = gmx_port_id; + + sc->iq_no = 0; + + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, + OPCODE_NIC_IF_CFG, 0, + if_cfg.u64, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(octeon_dev, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed status: %x\n", + retval); + /* Soft instr is freed by driver in case of failure. */ + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; + + retval = resp->status; + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + /* Verify f/w version (in case of 'auto' loading from flash) */ + fw_ver = octeon_dev->fw_info.liquidio_firmware_version; + if (memcmp(LIQUIDIO_BASE_VERSION, + fw_ver, + strlen(LIQUIDIO_BASE_VERSION))) { + dev_err(&octeon_dev->pci_dev->dev, + "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, fw_ver); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } else if (atomic_read(octeon_dev->adapter_fw_state) == + FW_IS_PRELOADED) { + dev_info(&octeon_dev->pci_dev->dev, + "Using auto-loaded firmware version %s.\n", + fw_ver); + } + + /* extract micro version field; point past '<maj>.<min>.' */ + micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; + if (kstrtoul(micro_ver, 10, µ) != 0) + micro = 0; + octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; + octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; + octeon_dev->fw_info.ver.rev = micro; + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + num_iqueues = hweight64(resp->cfg_info.iqmask); + num_oqueues = hweight64(resp->cfg_info.oqmask); + + if (!(num_iqueues) || !(num_oqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", + resp->cfg_info.iqmask, + resp->cfg_info.oqmask); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } + + if (OCTEON_CN6XXX(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn6xxx)); + } else if (OCTEON_CN23XX_PF(octeon_dev)) { + max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, + cn23xx_pf)); + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", + i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, + num_iqueues, num_oqueues, max_num_queues); + netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); + + if (!netdev) { + dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } + + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); + + /* Associate the routines that will handle different + * netdev tasks. + */ + netdev->netdev_ops = &lionetdevops; + + retval = netif_set_real_num_rx_queues(netdev, num_oqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number rx failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; + } + + retval = netif_set_real_num_tx_queues(netdev, num_iqueues); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "setting real number tx failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_free; + } + + lio = GET_LIO(netdev); + + memset(lio, 0, sizeof(struct lio)); + + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; + + lio->linfo.num_rxpciq = num_oqueues; + lio->linfo.num_txpciq = num_iqueues; + for (j = 0; j < num_oqueues; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + for (j = 0; j < num_iqueues; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + + WRITE_ONCE(sc->caller_is_done, true); + + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + if (OCTEON_CN23XX_PF(octeon_dev) || + OCTEON_CN6XXX(octeon_dev)) { + lio->dev_capability = NETIF_F_HIGHDMA + | NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_GRO + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + } + netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device + */ + lio->enc_dev_capability = NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_GSO_UDP_TUNNEL + | NETIF_F_HW_CSUM | NETIF_F_SG + | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + + netdev->hw_enc_features = (lio->enc_dev_capability & + ~NETIF_F_LRO); + + netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; + + lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; + + netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); + + netdev->hw_features = lio->dev_capability; + /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ + netdev->hw_features = netdev->hw_features & + ~NETIF_F_HW_VLAN_CTAG_RX; + + /* MTU range: 68 - 16000 */ + netdev->min_mtu = LIO_MIN_MTU_SIZE; + netdev->max_mtu = LIO_MAX_MTU_SIZE; + + /* Point to the properties for octeon device to which this + * interface belongs. + */ + lio->oct_dev = octeon_dev; + lio->octprops = props; + lio->netdev = netdev; + + dev_dbg(&octeon_dev->pci_dev->dev, + "if%d gmx: %d hw_addr: 0x%llx\n", i, + lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); + + for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { + u8 vfmac[ETH_ALEN]; + + eth_random_addr(vfmac); + if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { + dev_err(&octeon_dev->pci_dev->dev, + "Error setting VF%d MAC address\n", + j); + goto setup_nic_dev_free; + } + } + + /* 64-bit swap required on LE machines */ + octeon_swap_8B_data(&lio->linfo.hw_addr, 1); + for (j = 0; j < 6; j++) + mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); + + /* Copy MAC Address to OS network device structure */ + + eth_hw_addr_set(netdev, mac); + + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { + dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); + goto setup_nic_dev_free; + } + + ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); + + lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); + lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); + + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Gather list allocation failed\n"); + goto setup_nic_dev_free; + } + + /* Register ethtool support */ + liquidio_set_ethtool_ops(netdev); + if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) + octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; + else + octeon_dev->priv_flags = 0x0; + + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, + OCTNET_CMD_VLAN_FILTER_ENABLE); + + if ((debug != -1) && (debug & NETIF_MSG_HW)) + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE, 0); + + if (setup_link_status_change_wq(netdev)) + goto setup_nic_dev_free; + + if ((octeon_dev->fw_info.app_cap_flags & + LIQUIDIO_TIME_SYNC_CAP) && + setup_sync_octeon_time_wq(netdev)) + goto setup_nic_dev_free; + + if (setup_rx_oom_poll_fn(netdev)) + goto setup_nic_dev_free; + + /* Register the network device with the OS */ + if (register_netdev(netdev)) { + dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); + goto setup_nic_dev_free; + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", + i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + netif_carrier_off(netdev); + lio->link_changes++; + + ifstate_set(lio, LIO_IFSTATE_REGISTERED); + + /* Sending command to firmware to enable Rx checksum offload + * by default at the time of setup of Liquidio driver for + * this device + */ + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, + OCTNET_CMD_TXCSUM_ENABLE); + + dev_dbg(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup successful\n", i); + + if (octeon_dev->subsystem_id == + OCTEON_CN2350_25GB_SUBSYS_ID || + octeon_dev->subsystem_id == + OCTEON_CN2360_25GB_SUBSYS_ID) { + cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, + octeon_dev->fw_info.ver.min, + octeon_dev->fw_info.ver.rev); + + /* speed control unsupported in f/w older than 1.7.2 */ + if (cur_ver < OCT_FW_VER(1, 7, 2)) { + dev_info(&octeon_dev->pci_dev->dev, + "speed setting not supported by f/w."); + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } else { + liquidio_get_speed(lio); + } + + if (octeon_dev->speed_setting == 0) { + octeon_dev->speed_setting = 25; + octeon_dev->no_speed_setting = 1; + } + } else { + octeon_dev->no_speed_setting = 1; + octeon_dev->speed_setting = 10; + } + octeon_dev->speed_boot = octeon_dev->speed_setting; + + /* don't read FEC setting if unsupported by f/w (see above) */ + if (octeon_dev->speed_boot == 25 && + !octeon_dev->no_speed_setting) { + liquidio_get_fec(lio); + octeon_dev->props[lio->ifidx].fec_boot = + octeon_dev->props[lio->ifidx].fec; + } + } + + device_lock(&octeon_dev->pci_dev->dev); + devlink = devlink_alloc(&liquidio_devlink_ops, + sizeof(struct lio_devlink_priv), + &octeon_dev->pci_dev->dev); + if (!devlink) { + device_unlock(&octeon_dev->pci_dev->dev); + dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); + goto setup_nic_dev_free; + } + + lio_devlink = devlink_priv(devlink); + lio_devlink->oct = octeon_dev; + + octeon_dev->devlink = devlink; + octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; + devlink_register(devlink); + device_unlock(&octeon_dev->pci_dev->dev); + + return 0; + +setup_nic_dev_free: + + while (i--) { + dev_err(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup failed\n", i); + liquidio_destroy_nic_device(octeon_dev, i); + } + +setup_nic_dev_done: + + return -ENODEV; +} + +#ifdef CONFIG_PCI_IOV +static int octeon_enable_sriov(struct octeon_device *oct) +{ + unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; + struct pci_dev *vfdev; + int err; + u32 u; + + if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { + err = pci_enable_sriov(oct->pci_dev, + oct->sriov_info.num_vfs_alloced); + if (err) { + dev_err(&oct->pci_dev->dev, + "OCTEON: Failed to enable PCI sriov: %d\n", + err); + oct->sriov_info.num_vfs_alloced = 0; + return err; + } + oct->sriov_info.sriov_enabled = 1; + + /* init lookup table that maps DPI ring number to VF pci_dev + * struct pointer + */ + u = 0; + vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + OCTEON_CN23XX_VF_VID, NULL); + while (vfdev) { + if (vfdev->is_virtfn && + (vfdev->physfn == oct->pci_dev)) { + oct->sriov_info.dpiring_to_vfpcidev_lut[u] = + vfdev; + u += oct->sriov_info.rings_per_vf; + } + vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + OCTEON_CN23XX_VF_VID, vfdev); + } + } + + return num_vfs_alloced; +} + +static int lio_pci_sriov_disable(struct octeon_device *oct) +{ + int u; + + if (pci_vfs_assigned(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); + return -EPERM; + } + + pci_disable_sriov(oct->pci_dev); + + u = 0; + while (u < MAX_POSSIBLE_VFS) { + oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; + u += oct->sriov_info.rings_per_vf; + } + + oct->sriov_info.num_vfs_alloced = 0; + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", + oct->pf_num); + + return 0; +} + +static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) +{ + struct octeon_device *oct = pci_get_drvdata(dev); + int ret = 0; + + if ((num_vfs == oct->sriov_info.num_vfs_alloced) && + (oct->sriov_info.sriov_enabled)) { + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", + oct->pf_num, num_vfs); + return 0; + } + + if (!num_vfs) { + lio_vf_rep_destroy(oct); + ret = lio_pci_sriov_disable(oct); + } else if (num_vfs > oct->sriov_info.max_vfs) { + dev_err(&oct->pci_dev->dev, + "OCTEON: Max allowed VFs:%d user requested:%d", + oct->sriov_info.max_vfs, num_vfs); + ret = -EPERM; + } else { + oct->sriov_info.num_vfs_alloced = num_vfs; + ret = octeon_enable_sriov(oct); + dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", + oct->pf_num, num_vfs); + ret = lio_vf_rep_create(oct); + if (ret) + dev_info(&oct->pci_dev->dev, + "vf representor create failed"); + } + + return ret; +} +#endif + +/** + * liquidio_init_nic_module - initialize the NIC + * @oct: octeon device + * + * This initialization routine is called once the Octeon device application is + * up and running + */ +static int liquidio_init_nic_module(struct octeon_device *oct) +{ + int i, retval = 0; + int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); + + dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); + + /* only default iq and oq were initialized + * initialize the rest as well + */ + /* run port_config command for each port */ + oct->ifcount = num_nic_ports; + + memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); + + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + + retval = setup_nic_devices(oct); + if (retval) { + dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); + goto octnet_init_failure; + } + + /* Call vf_rep_modinit if the firmware is switchdev capable + * and do it from the first liquidio function probed. + */ + if (!oct->octeon_id && + oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { + retval = lio_vf_rep_modinit(); + if (retval) { + liquidio_stop_nic_module(oct); + goto octnet_init_failure; + } + } + + liquidio_ptp_init(oct); + + dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); + + return retval; + +octnet_init_failure: + + oct->ifcount = 0; + + return retval; +} + +/** + * nic_starter - finish init + * @work: work struct work_struct + * + * starter callback that invokes the remaining initialization work after the NIC is up and running. + */ +static void nic_starter(struct work_struct *work) +{ + struct octeon_device *oct; + struct cavium_wk *wk = (struct cavium_wk *)work; + + oct = (struct octeon_device *)wk->ctxptr; + + if (atomic_read(&oct->status) == OCT_DEV_RUNNING) + return; + + /* If the status of the device is CORE_OK, the core + * application has reported its application type. Call + * any registered handlers now and move to the RUNNING + * state. + */ + if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { + schedule_delayed_work(&oct->nic_poll_work.work, + LIQUIDIO_STARTER_POLL_INTERVAL_MS); + return; + } + + atomic_set(&oct->status, OCT_DEV_RUNNING); + + if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { + dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); + + if (liquidio_init_nic_module(oct)) + dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); + else + handshake[oct->octeon_id].started_ok = 1; + } else { + dev_err(&oct->pci_dev->dev, + "Unexpected application running on NIC (%d). Check firmware.\n", + oct->app_mode); + } + + complete(&handshake[oct->octeon_id].started); +} + +static int +octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + int i, notice, vf_idx; + bool cores_crashed; + u64 *data, vf_num; + + notice = recv_pkt->rh.r.ossp; + data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); + + /* the first 64-bit word of data is the vf_num */ + vf_num = data[0]; + octeon_swap_8B_data(&vf_num, 1); + vf_idx = (int)vf_num - 1; + + cores_crashed = READ_ONCE(oct->cores_crashed); + + if (notice == VF_DRV_LOADED) { + if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { + oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); + dev_info(&oct->pci_dev->dev, + "driver for VF%d was loaded\n", vf_idx); + if (!cores_crashed) + try_module_get(THIS_MODULE); + } + } else if (notice == VF_DRV_REMOVED) { + if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { + oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); + dev_info(&oct->pci_dev->dev, + "driver for VF%d was removed\n", vf_idx); + if (!cores_crashed) + module_put(THIS_MODULE); + } + } else if (notice == VF_DRV_MACADDR_CHANGED) { + u8 *b = (u8 *)&data[1]; + + oct->sriov_info.vf_macaddr[vf_idx] = data[1]; + dev_info(&oct->pci_dev->dev, + "VF driver changed VF%d's MAC address to %pM\n", + vf_idx, b + 2); + } + + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + + return 0; +} + +/** + * octeon_device_init - Device initialization for each Octeon device that is probed + * @octeon_dev: octeon device + */ +static int octeon_device_init(struct octeon_device *octeon_dev) +{ + int j, ret; + char bootcmd[] = "\n"; + char *dbg_enb = NULL; + enum lio_fw_state fw_state; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)octeon_dev->priv; + atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); + + /* Enable access to the octeon device and make its DMA capability + * known to the OS. + */ + if (octeon_pci_os_setup(octeon_dev)) + return 1; + + atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); + + /* Identify the Octeon type and map the BAR address space. */ + if (octeon_chip_specific_setup(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); + return 1; + } + + atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); + + /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', + * since that is what is required for the reference to be removed + * during de-initialization (see 'octeon_destroy_resources'). + */ + octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, + PCI_SLOT(octeon_dev->pci_dev->devfn), + PCI_FUNC(octeon_dev->pci_dev->devfn), + true); + + octeon_dev->app_mode = CVM_DRV_INVALID_APP; + + /* CN23XX supports preloaded firmware if the following is true: + * + * The adapter indicates that firmware is currently running AND + * 'fw_type' is 'auto'. + * + * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). + */ + if (OCTEON_CN23XX_PF(octeon_dev) && + cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { + atomic_cmpxchg(octeon_dev->adapter_fw_state, + FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); + } + + /* If loading firmware, only first device of adapter needs to do so. */ + fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, + FW_NEEDS_TO_BE_LOADED, + FW_IS_BEING_LOADED); + + /* Here, [local variable] 'fw_state' is set to one of: + * + * FW_IS_PRELOADED: No firmware is to be loaded (see above) + * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load + * firmware to the adapter. + * FW_IS_BEING_LOADED: The driver's second instance will not load + * firmware to the adapter. + */ + + /* Prior to f/w load, perform a soft reset of the Octeon device; + * if error resetting, return w/error. + */ + if (fw_state == FW_NEEDS_TO_BE_LOADED) + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + + /* Initialize the dispatch mechanism used to push packets arriving on + * Octeon Output queues. + */ + if (octeon_init_dispatch_list(octeon_dev)) + return 1; + + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_CORE_DRV_ACTIVE, + octeon_core_drv_init, + octeon_dev); + + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_VF_DRV_NOTICE, + octeon_recv_vf_drv_notice, octeon_dev); + INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); + octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; + schedule_delayed_work(&octeon_dev->nic_poll_work.work, + LIQUIDIO_STARTER_POLL_INTERVAL_MS); + + atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); + + if (octeon_set_io_queues_off(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); + return 1; + } + + if (OCTEON_CN23XX_PF(octeon_dev)) { + ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); + return ret; + } + } + + /* Initialize soft command buffer pool + */ + if (octeon_setup_sc_buffer_pool(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); + + /* Setup the data structures that manage this Octeon's Input queues. */ + if (octeon_setup_instr_queues(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, + "instruction queue initialization failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); + + /* Initialize lists to manage the requests of different types that + * arrive from user & kernel applications for this octeon device. + */ + if (octeon_setup_response_list(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); + + if (octeon_setup_output_queues(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); + return 1; + } + + atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); + + if (OCTEON_CN23XX_PF(octeon_dev)) { + if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); + + if (octeon_allocate_ioq_vector + (octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) { + dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); + + } else { + /* The input and output queue registers were setup earlier (the + * queues were not enabled). Any additional registers + * that need to be programmed should be done now. + */ + ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, + "Failed to configure device registers\n"); + return ret; + } + } + + /* Initialize the tasklet that handles output queue packet processing.*/ + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); + tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh); + + /* Setup the interrupt handler and record the INT SUM register address + */ + if (octeon_setup_interrupt(octeon_dev, + octeon_dev->sriov_info.num_pf_rings)) + return 1; + + /* Enable Octeon device interrupts */ + octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); + + atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); + + /* Send Credit for Octeon Output queues. Credits are always sent BEFORE + * the output queue is enabled. + * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in + * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. + * Otherwise, it is possible that the DRV_ACTIVE message will be sent + * before any credits have been issued, causing the ring to be reset + * (and the f/w appear to never have started). + */ + for (j = 0; j < octeon_dev->num_oqs; j++) + writel(octeon_dev->droq[j]->max_count, + octeon_dev->droq[j]->pkts_credit_reg); + + /* Enable the input and output queues for this Octeon device */ + ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); + return ret; + } + + atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); + + if (fw_state == FW_NEEDS_TO_BE_LOADED) { + dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); + if (!ddr_timeout) { + dev_info(&octeon_dev->pci_dev->dev, + "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); + } + + schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); + + /* Wait for the octeon to initialize DDR after the soft-reset.*/ + while (!ddr_timeout) { + set_current_state(TASK_INTERRUPTIBLE); + if (schedule_timeout(HZ / 10)) { + /* user probably pressed Control-C */ + return 1; + } + } + ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, + "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", + ret); + return 1; + } + + if (octeon_wait_for_bootloader(octeon_dev, 1000)) { + dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); + return 1; + } + + /* Divert uboot to take commands from host instead. */ + ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); + + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); + ret = octeon_init_consoles(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); + return 1; + } + /* If console debug enabled, specify empty string to use default + * enablement ELSE specify NULL string for 'disabled'. + */ + dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; + ret = octeon_add_console(octeon_dev, 0, dbg_enb); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); + return 1; + } else if (octeon_console_debug_enabled(0)) { + /* If console was added AND we're logging console output + * then set our console print function. + */ + octeon_dev->console[0].print = octeon_dbg_console_print; + } + + atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); + + dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); + ret = load_firmware(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); + return 1; + } + + atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); + } + + handshake[octeon_dev->octeon_id].init_ok = 1; + complete(&handshake[octeon_dev->octeon_id].init); + + atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); + oct_priv->dev = octeon_dev; + + return 0; +} + +/** + * octeon_dbg_console_print - Debug console print function + * @oct: octeon device + * @console_num: console number + * @prefix: first portion of line to display + * @suffix: second portion of line to display + * + * The OCTEON debug console outputs entire lines (excluding '\n'). + * Normally, the line will be passed in the 'prefix' parameter. + * However, due to buffering, it is possible for a line to be split into two + * parts, in which case they will be passed as the 'prefix' parameter and + * 'suffix' parameter. + */ +static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, + char *prefix, char *suffix) +{ + if (prefix && suffix) + dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, + suffix); + else if (prefix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); + else if (suffix) + dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); + + return 0; +} + +/** + * liquidio_exit - Exits the module + */ +static void __exit liquidio_exit(void) +{ + liquidio_deinit_pci(); + + pr_info("LiquidIO network module is now unloaded\n"); +} + +module_init(liquidio_init); +module_exit(liquidio_exit); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c new file mode 100644 index 000000000..ac196883f --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -0,0 +1,2442 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <net/vxlan.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn23xx_vf_device.h" + +MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver"); +MODULE_LICENSE("GPL"); + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +struct oct_timestamp_resp { + u64 rh; + u64 timestamp; + u64 status; +}; + +union tx_info { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 gso_size; + u16 gso_segs; + u32 reserved; +#else + u32 reserved; + u16 gso_segs; + u16 gso_size; +#endif + } s; +}; + +#define OCTNIC_GSO_MAX_HEADER_SIZE 128 +#define OCTNIC_GSO_MAX_SIZE \ + (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) + +static int +liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void liquidio_vf_remove(struct pci_dev *pdev); +static int octeon_device_init(struct octeon_device *oct); +static int liquidio_stop(struct net_device *netdev); + +static int lio_wait_for_oq_pkts(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + int retry = MAX_IO_PENDING_PKT_COUNT; + int pkt_cnt = 0, pending_pkts; + int i; + + do { + pending_pkts = 0; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); + } + if (pkt_cnt > 0) { + pending_pkts += pkt_cnt; + tasklet_schedule(&oct_priv->droq_tasklet); + } + pkt_cnt = 0; + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pkt_cnt; +} + +/** + * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc + * @oct: Pointer to Octeon device + */ +static void pcierror_quiesce_device(struct octeon_device *oct) +{ + int i; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet processing + * to finish. + */ + + /* To allow for in-flight requests */ + schedule_timeout_uninterruptible(100); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Force all requests waiting to be fetched by OCTEON to complete. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + /* Force all pending ordered list requests to time out. */ + lio_process_ordered_list(oct, 1); + + /* We do not need to wait for output queue packets to be processed. */ +} + +/** + * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status + * @dev: Pointer to PCI device + */ +static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + u32 status, mask; + int pos = 0x100; + + pr_info("%s :\n", __func__); + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); + if (dev->error_state == pci_channel_io_normal) + status &= ~mask; /* Clear corresponding nonfatal bits */ + else + status &= mask; /* Clear corresponding fatal bits */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); +} + +/** + * stop_pci_io - Stop all PCI IO to a given device + * @oct: Pointer to Octeon device + */ +static void stop_pci_io(struct octeon_device *oct) +{ + struct msix_entry *msix_entries; + int i; + + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + for (i = 0; i < oct->ifcount; i++) + netif_device_detach(oct->props[i].netdev); + + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + pcierror_quiesce_device(oct); + if (oct->msix_on) { + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < oct->num_msix_irqs; i++) { + /* clear the affinity_cpumask */ + irq_set_affinity_hint(msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + octeon_free_ioq_vector(oct); + } + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + /* making it a common function for all OCTEON models */ + cleanup_aer_uncorrect_error_status(oct->pci_dev); + + pci_disable_device(oct->pci_dev); +} + +/** + * liquidio_pcie_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct octeon_device *oct = pci_get_drvdata(pdev); + + /* Non-correctable Non-fatal errors */ + if (state == pci_channel_io_normal) { + dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); + cleanup_aer_uncorrect_error_status(oct->pci_dev); + return PCI_ERS_RESULT_CAN_RECOVER; + } + + /* Non-correctable Fatal errors */ + dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); + stop_pci_io(oct); + + return PCI_ERS_RESULT_DISCONNECT; +} + +/* For PCI-E Advanced Error Recovery (AER) Interface */ +static const struct pci_error_handlers liquidio_vf_err_handler = { + .error_detected = liquidio_pcie_error_detected, +}; + +static const struct pci_device_id liquidio_vf_pci_tbl[] = { + { + PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0 + } +}; +MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl); + +static struct pci_driver liquidio_vf_pci_driver = { + .name = "LiquidIO_VF", + .id_table = liquidio_vf_pci_tbl, + .probe = liquidio_vf_probe, + .remove = liquidio_vf_remove, + .err_handler = &liquidio_vf_err_handler, /* For AER */ +}; + +/** + * print_link_info - Print link information + * @netdev: network device + */ +static void print_link_info(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && + ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { + struct oct_link_info *linfo = &lio->linfo; + + if (linfo->link.s.link_up) { + netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", + linfo->link.s.speed, + (linfo->link.s.duplex) ? "Full" : "Half"); + } else { + netif_info(lio, link, lio->netdev, "Link Down\n"); + } + } +} + +/** + * octnet_link_status_change - Routine to notify MTU change + * @work: work_struct data structure + */ +static void octnet_link_status_change(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + + /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. + * this API is invoked only when new max-MTU of the interface is + * less than current MTU. + */ + rtnl_lock(); + dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); + rtnl_unlock(); +} + +/** + * setup_link_status_change_wq - Sets up the mtu status change work + * @netdev: network device + */ +static int setup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->link_status_wq.wq = alloc_workqueue("link-status", + WQ_MEM_RECLAIM, 0); + if (!lio->link_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); + return -1; + } + INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, + octnet_link_status_change); + lio->link_status_wq.wk.ctxptr = lio; + + return 0; +} + +static void cleanup_link_status_change_wq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->link_status_wq.wq) { + cancel_delayed_work_sync(&lio->link_status_wq.wk.work); + destroy_workqueue(lio->link_status_wq.wq); + } +} + +/** + * update_link_status - Update link status + * @netdev: network device + * @ls: link status structure + * + * Called on receipt of a link status response from the core application to + * update each interface's link status. + */ +static void update_link_status(struct net_device *netdev, + union oct_link_status *ls) +{ + struct lio *lio = GET_LIO(netdev); + int current_max_mtu = lio->linfo.link.s.mtu; + struct octeon_device *oct = lio->oct_dev; + + if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { + lio->linfo.link.u64 = ls->u64; + + print_link_info(netdev); + lio->link_changes++; + + if (lio->linfo.link.s.link_up) { + netif_carrier_on(netdev); + wake_txqs(netdev); + } else { + netif_carrier_off(netdev); + stop_txqs(netdev); + } + + if (lio->linfo.link.s.mtu != current_max_mtu) { + dev_info(&oct->pci_dev->dev, + "Max MTU Changed from %d to %d\n", + current_max_mtu, lio->linfo.link.s.mtu); + netdev->max_mtu = lio->linfo.link.s.mtu; + } + + if (lio->linfo.link.s.mtu < netdev->mtu) { + dev_warn(&oct->pci_dev->dev, + "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", + netdev->mtu, lio->linfo.link.s.mtu); + queue_delayed_work(lio->link_status_wq.wq, + &lio->link_status_wq.wk.work, 0); + } + } +} + +/** + * liquidio_vf_probe - PCI probe handler + * @pdev: PCI device structure + * @ent: unused + */ +static int +liquidio_vf_probe(struct pci_dev *pdev, + const struct pci_device_id __maybe_unused *ent) +{ + struct octeon_device *oct_dev = NULL; + + oct_dev = octeon_allocate_device(pdev->device, + sizeof(struct octeon_device_priv)); + + if (!oct_dev) { + dev_err(&pdev->dev, "Unable to allocate device\n"); + return -ENOMEM; + } + oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; + + dev_info(&pdev->dev, "Initializing device %x:%x.\n", + (u32)pdev->vendor, (u32)pdev->device); + + /* Assign octeon_device for this device to the private data area. */ + pci_set_drvdata(pdev, oct_dev); + + /* set linux specific device pointer */ + oct_dev->pci_dev = pdev; + + oct_dev->subsystem_id = pdev->subsystem_vendor | + (pdev->subsystem_device << 16); + + if (octeon_device_init(oct_dev)) { + liquidio_vf_remove(pdev); + return -ENOMEM; + } + + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); + + return 0; +} + +/** + * octeon_pci_flr - PCI FLR for each Octeon device. + * @oct: octeon device + */ +static void octeon_pci_flr(struct octeon_device *oct) +{ + pci_save_state(oct->pci_dev); + + pci_cfg_access_lock(oct->pci_dev); + + /* Quiesce the device completely */ + pci_write_config_word(oct->pci_dev, PCI_COMMAND, + PCI_COMMAND_INTX_DISABLE); + + pcie_flr(oct->pci_dev); + + pci_cfg_access_unlock(oct->pci_dev); + + pci_restore_state(oct->pci_dev); +} + +/** + * octeon_destroy_resources - Destroy resources associated with octeon device + * @oct: octeon device + */ +static void octeon_destroy_resources(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct msix_entry *msix_entries; + int i; + + switch (atomic_read(&oct->status)) { + case OCT_DEV_RUNNING: + case OCT_DEV_CORE_OK: + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + oct->app_mode = CVM_DRV_INVALID_APP; + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + schedule_timeout_uninterruptible(HZ / 10); + + fallthrough; + case OCT_DEV_HOST_OK: + case OCT_DEV_IO_QUEUES_DONE: + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet + * processing to finish. + */ + oct->fn_list.disable_io_queues(oct); + + if (lio_wait_for_oq_pkts(oct)) + dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + fallthrough; + case OCT_DEV_INTR_SET_DONE: + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); + + if (oct->msix_on) { + msix_entries = (struct msix_entry *)oct->msix_entries; + for (i = 0; i < oct->num_msix_irqs; i++) { + if (oct->ioq_vector[i].vector) { + irq_set_affinity_hint( + msix_entries[i].vector, + NULL); + free_irq(msix_entries[i].vector, + &oct->ioq_vector[i]); + oct->ioq_vector[i].vector = 0; + } + } + pci_disable_msix(oct->pci_dev); + kfree(oct->msix_entries); + oct->msix_entries = NULL; + kfree(oct->irq_name_storage); + oct->irq_name_storage = NULL; + } + /* Soft reset the octeon device before exiting */ + if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE)) + octeon_pci_flr(oct); + else + cn23xx_vf_ask_pf_to_do_flr(oct); + + fallthrough; + case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: + octeon_free_ioq_vector(oct); + + fallthrough; + case OCT_DEV_MBOX_SETUP_DONE: + oct->fn_list.free_mbox(oct); + + fallthrough; + case OCT_DEV_IN_RESET: + case OCT_DEV_DROQ_INIT_DONE: + mdelay(100); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (!(oct->io_qmask.oq & BIT_ULL(i))) + continue; + octeon_delete_droq(oct, i); + } + + fallthrough; + case OCT_DEV_RESP_LIST_INIT_DONE: + octeon_delete_response_list(oct); + + fallthrough; + case OCT_DEV_INSTR_QUEUE_INIT_DONE: + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + fallthrough; + case OCT_DEV_SC_BUFF_POOL_INIT_DONE: + octeon_free_sc_buffer_pool(oct); + + fallthrough; + case OCT_DEV_DISPATCH_INIT_DONE: + octeon_delete_dispatch_list(oct); + cancel_delayed_work_sync(&oct->nic_poll_work.work); + + fallthrough; + case OCT_DEV_PCI_MAP_DONE: + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + + fallthrough; + case OCT_DEV_PCI_ENABLE_DONE: + pci_clear_master(oct->pci_dev); + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + + fallthrough; + case OCT_DEV_BEGIN_STATE: + /* Nothing to be done here either */ + break; + } + + tasklet_kill(&oct_priv->droq_tasklet); +} + +/** + * send_rx_ctrl_cmd - Send Rx control command + * @lio: per-network private data + * @start_stop: whether to start or stop + */ +static int send_rx_ctrl_cmd(struct lio *lio, int start_stop) +{ + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + struct octeon_soft_command *sc; + union octnet_cmd *ncmd; + int retval; + + if (oct->props[lio->ifidx].rx_on == start_stop) + return 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, + 16, 0); + if (!sc) { + netif_info(lio, rx_err, lio->netdev, + "Failed to allocate octeon_soft_command struct\n"); + return -ENOMEM; + } + + ncmd = (union octnet_cmd *)sc->virtdptr; + + ncmd->u64 = 0; + ncmd->s.cmd = OCTNET_CMD_RX_CTL; + ncmd->s.param1 = start_stop; + + octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); + + sc->iq_no = lio->linfo.txpciq[0].s.q_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_CMD, 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); + octeon_free_soft_command(oct, sc); + } else { + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return retval; + + oct->props[lio->ifidx].rx_on = start_stop; + WRITE_ONCE(sc->caller_is_done, true); + } + + return retval; +} + +/** + * liquidio_destroy_nic_device - Destroy NIC device interface + * @oct: octeon device + * @ifidx: which interface to destroy + * + * Cleanup associated with each interface for an Octeon device when NIC + * module is being unloaded or if initialization fails during load. + */ +static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) +{ + struct net_device *netdev = oct->props[ifidx].netdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + struct lio *lio; + + if (!netdev) { + dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", + __func__, ifidx); + return; + } + + lio = GET_LIO(netdev); + + dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) + liquidio_stop(netdev); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + } + + /* Delete NAPI */ + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + netif_napi_del(napi); + + tasklet_enable(&oct_priv->droq_tasklet); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) + unregister_netdev(netdev); + + cleanup_rx_oom_poll_fn(netdev); + + cleanup_link_status_change_wq(netdev); + + lio_delete_glists(lio); + + free_netdev(netdev); + + oct->props[ifidx].gmxport = -1; + + oct->props[ifidx].netdev = NULL; +} + +/** + * liquidio_stop_nic_module - Stop complete NIC functionality + * @oct: octeon device + */ +static int liquidio_stop_nic_module(struct octeon_device *oct) +{ + struct lio *lio; + int i, j; + + dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + if (!oct->ifcount) { + dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); + return 1; + } + + spin_lock_bh(&oct->cmd_resp_wqlock); + oct->cmd_resp_state = OCT_DRV_OFFLINE; + spin_unlock_bh(&oct->cmd_resp_wqlock); + + for (i = 0; i < oct->ifcount; i++) { + lio = GET_LIO(oct->props[i].netdev); + for (j = 0; j < oct->num_oqs; j++) + octeon_unregister_droq_ops(oct, + lio->linfo.rxpciq[j].s.q_no); + } + + for (i = 0; i < oct->ifcount; i++) + liquidio_destroy_nic_device(oct, i); + + dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); + return 0; +} + +/** + * liquidio_vf_remove - Cleans up resources at unload time + * @pdev: PCI device structure + */ +static void liquidio_vf_remove(struct pci_dev *pdev) +{ + struct octeon_device *oct_dev = pci_get_drvdata(pdev); + + dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); + + if (oct_dev->app_mode == CVM_DRV_NIC_APP) + liquidio_stop_nic_module(oct_dev); + + /* Reset the octeon device and cleanup all memory allocated for + * the octeon device by driver. + */ + octeon_destroy_resources(oct_dev); + + dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); + + /* This octeon device has been removed. Update the global + * data structure to reflect this. Free the device structure. + */ + octeon_free_device_mem(oct_dev); +} + +/** + * octeon_pci_os_setup - PCI initialization for each Octeon device. + * @oct: octeon device + */ +static int octeon_pci_os_setup(struct octeon_device *oct) +{ +#ifdef CONFIG_PCI_IOV + /* setup PCI stuff first */ + if (!oct->pci_dev->physfn) + octeon_pci_flr(oct); +#endif + + if (pci_enable_device(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); + return 1; + } + + if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { + dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); + pci_disable_device(oct->pci_dev); + return 1; + } + + /* Enable PCI DMA Master. */ + pci_set_master(oct->pci_dev); + + return 0; +} + +/** + * free_netbuf - Unmap and free network buffer + * @buf: buffer + */ +static void free_netbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, + DMA_TO_DEVICE); + + tx_buffer_free(skb); +} + +/** + * free_netsgbuf - Unmap and free gather buffer + * @buf: buffer + */ +static void free_netsgbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octnic_gather *g; + struct sk_buff *skb; + int i, frags, iq; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + dma_unmap_page(&lio->oct_dev->pci_dev->dev, + g->sg[(i >> 2)].ptr[(i & 3)], + skb_frag_size(frag), DMA_TO_DEVICE); + i++; + } + + iq = skb_iq(lio->oct_dev, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + tx_buffer_free(skb); +} + +/** + * free_netsgbuf_with_resp - Unmap and free gather buffer with response + * @buf: buffer + */ +static void free_netsgbuf_with_resp(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octeon_soft_command *sc; + struct octnic_gather *g; + struct sk_buff *skb; + int i, frags, iq; + struct lio *lio; + + sc = (struct octeon_soft_command *)buf; + skb = (struct sk_buff *)sc->callback_arg; + finfo = (struct octnet_buf_free_info *)&skb->cb; + + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; + + dma_unmap_page(&lio->oct_dev->pci_dev->dev, + g->sg[(i >> 2)].ptr[(i & 3)], + skb_frag_size(frag), DMA_TO_DEVICE); + i++; + } + + iq = skb_iq(lio->oct_dev, skb); + + spin_lock(&lio->glist_lock[iq]); + list_add_tail(&g->list, &lio->glist[iq]); + spin_unlock(&lio->glist_lock[iq]); + + /* Don't free the skb yet */ +} + +/** + * liquidio_open - Net device open for LiquidIO + * @netdev: network device + */ +static int liquidio_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + int ret = 0; + + if (!oct->props[lio->ifidx].napi_enabled) { + tasklet_disable(&oct_priv->droq_tasklet); + + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct->props[lio->ifidx].napi_enabled = 1; + + oct->droq[0]->ops.poll_mode = 1; + } + + ifstate_set(lio, LIO_IFSTATE_RUNNING); + + /* Ready for link status updates */ + lio->intf_open = 1; + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); + start_txqs(netdev); + + INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); + lio->stats_wk.ctxptr = lio; + schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies + (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); + + /* tell Octeon to start forwarding packets to host */ + ret = send_rx_ctrl_cmd(lio, 1); + if (ret) + return ret; + + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); + + return ret; +} + +/** + * liquidio_stop - jNet device stop for LiquidIO + * @netdev: network device + */ +static int liquidio_stop(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + struct napi_struct *napi, *n; + int ret = 0; + + /* tell Octeon to stop forwarding packets to host */ + ret = send_rx_ctrl_cmd(lio, 0); + if (ret) + return ret; + + netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + /* Inform that netif carrier is down */ + lio->intf_open = 0; + lio->linfo.link.s.link_up = 0; + + netif_carrier_off(netdev); + lio->link_changes++; + + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + stop_txqs(netdev); + + /* Wait for any pending Rx descriptors */ + if (lio_wait_for_clean_oq(oct)) + netif_info(lio, rx_err, lio->netdev, + "Proceeding with stop interface after partial RX desc processing\n"); + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + oct->droq[0]->ops.poll_mode = 0; + + tasklet_enable(&oct_priv->droq_tasklet); + } + + cancel_delayed_work_sync(&lio->stats_wk.work); + + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); + + return ret; +} + +/** + * get_new_flags - Converts a mask based on net device flags + * @netdev: network device + * + * This routine generates a octnet_ifflags mask from the net device flags + * received from the OS. + */ +static enum octnet_ifflags get_new_flags(struct net_device *netdev) +{ + enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; + + if (netdev->flags & IFF_PROMISC) + f |= OCTNET_IFFLAG_PROMISC; + + if (netdev->flags & IFF_ALLMULTI) + f |= OCTNET_IFFLAG_ALLMULTI; + + if (netdev->flags & IFF_MULTICAST) { + f |= OCTNET_IFFLAG_MULTICAST; + + /* Accept all multicast addresses if there are more than we + * can handle + */ + if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) + f |= OCTNET_IFFLAG_ALLMULTI; + } + + if (netdev->flags & IFF_BROADCAST) + f |= OCTNET_IFFLAG_BROADCAST; + + return f; +} + +static void liquidio_set_uc_list(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct netdev_hw_addr *ha; + u64 *mac; + + if (lio->netdev_uc_count == netdev_uc_count(netdev)) + return; + + if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) { + dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n"); + return; + } + + lio->netdev_uc_count = netdev_uc_count(netdev); + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST; + nctrl.ncmd.s.more = lio->netdev_uc_count; + nctrl.ncmd.s.param1 = oct->vf_num; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + mac = &nctrl.udd[0]; + netdev_for_each_uc_addr(ha, netdev) { + ether_addr_copy(((u8 *)mac) + 2, ha->addr); + mac++; + } + + octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); +} + +/** + * liquidio_set_mcast_list - Net device set_multicast_list + * @netdev: network device + */ +static void liquidio_set_mcast_list(struct net_device *netdev) +{ + int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct netdev_hw_addr *ha; + u64 *mc; + int ret; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + /* Create a ctrl pkt command to be sent to core app. */ + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; + nctrl.ncmd.s.param1 = get_new_flags(netdev); + nctrl.ncmd.s.param2 = mc_count; + nctrl.ncmd.s.more = mc_count; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + mc = &nctrl.udd[0]; + netdev_for_each_mc_addr(ha, netdev) { + *mc = 0; + ether_addr_copy(((u8 *)mc) + 2, ha->addr); + /* no need to swap bytes */ + if (++mc > &nctrl.udd[mc_count]) + break; + } + + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + + /* Apparently, any activity in this call from the kernel has to + * be atomic. So we won't wait for response. + */ + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", + ret); + } + + liquidio_set_uc_list(netdev); +} + +/** + * liquidio_set_mac - Net device set_mac_address + * @netdev: network device + * @p: opaque pointer to sockaddr + */ +static int liquidio_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = (struct sockaddr *)p; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) + return 0; + + if (lio->linfo.macaddr_is_admin_asgnd) + return -EPERM; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + nctrl.ncmd.s.param1 = 0; + nctrl.ncmd.s.more = 1; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data); + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); + return -ENOMEM; + } + + if (nctrl.sc_status == + FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n"); + return -EPERM; + } + + eth_hw_addr_set(netdev, addr->sa_data); + ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data); + + return 0; +} + +static void +liquidio_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *lstats) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct; + u64 pkts = 0, drop = 0, bytes = 0; + struct oct_droq_stats *oq_stats; + struct oct_iq_stats *iq_stats; + int i, iq_no, oq_no; + + oct = lio->oct_dev; + + if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) + return; + + for (i = 0; i < oct->num_iqs; i++) { + iq_no = lio->linfo.txpciq[i].s.q_no; + iq_stats = &oct->instr_queue[iq_no]->stats; + pkts += iq_stats->tx_done; + drop += iq_stats->tx_dropped; + bytes += iq_stats->tx_tot_bytes; + } + + lstats->tx_packets = pkts; + lstats->tx_bytes = bytes; + lstats->tx_dropped = drop; + + pkts = 0; + drop = 0; + bytes = 0; + + for (i = 0; i < oct->num_oqs; i++) { + oq_no = lio->linfo.rxpciq[i].s.q_no; + oq_stats = &oct->droq[oq_no]->stats; + pkts += oq_stats->rx_pkts_received; + drop += (oq_stats->rx_dropped + + oq_stats->dropped_nodispatch + + oq_stats->dropped_toomany + + oq_stats->dropped_nomem); + bytes += oq_stats->rx_bytes_received; + } + + lstats->rx_bytes = bytes; + lstats->rx_packets = pkts; + lstats->rx_dropped = drop; + + lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; + + /* detailed rx_errors: */ + lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; + /* recved pkt with crc error */ + lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; + /* recv'd frame alignment error */ + lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; + + lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + + lstats->rx_frame_errors; + + /* detailed tx_errors */ + lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; + lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; + + lstats->tx_errors = lstats->tx_aborted_errors + + lstats->tx_carrier_errors; +} + +/** + * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl + * @netdev: network device + * @ifr: interface request + */ +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + struct lio *lio = GET_LIO(netdev); + struct hwtstamp_config conf; + + if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) + return -EFAULT; + + switch (conf.tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + switch (conf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + case HWTSTAMP_FILTER_NTP_ALL: + conf.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + else + ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; +} + +/** + * liquidio_ioctl - ioctl handler + * @netdev: network device + * @ifr: interface request + * @cmd: command + */ +static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return hwtstamp_ioctl(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf) +{ + struct sk_buff *skb = (struct sk_buff *)buf; + struct octnet_buf_free_info *finfo; + struct oct_timestamp_resp *resp; + struct octeon_soft_command *sc; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)skb->cb; + lio = finfo->lio; + sc = finfo->sc; + oct = lio->oct_dev; + resp = (struct oct_timestamp_resp *)sc->virtrptr; + + if (status != OCTEON_REQUEST_DONE) { + dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", + CVM_CAST64(status)); + resp->timestamp = 0; + } + + octeon_swap_8B_data(&resp->timestamp, 1); + + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps ts; + u64 ns = resp->timestamp; + + netif_info(lio, tx_done, lio->netdev, + "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", + skb, (unsigned long long)ns); + ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); + skb_tstamp_tx(skb, &ts); + } + + octeon_free_soft_command(oct, sc); + tx_buffer_free(skb); +} + +/* send_nic_timestamp_pkt - Send a data packet that will be timestamped + * @oct: octeon device + * @ndata: pointer to network data + * @finfo: pointer to private network data + */ +static int send_nic_timestamp_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + struct octnet_buf_free_info *finfo, + int xmit_more) +{ + struct octeon_soft_command *sc; + int ring_doorbell; + struct lio *lio; + int retval; + u32 len; + + lio = finfo->lio; + + sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, + sizeof(struct oct_timestamp_resp)); + finfo->sc = sc; + + if (!sc) { + dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); + return IQ_SEND_FAILED; + } + + if (ndata->reqtype == REQTYPE_NORESP_NET) + ndata->reqtype = REQTYPE_RESP_NET; + else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) + ndata->reqtype = REQTYPE_RESP_NET_SG; + + sc->callback = handle_timestamp; + sc->callback_arg = finfo->skb; + sc->iq_no = ndata->q_no; + + len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz; + + ring_doorbell = !xmit_more; + + retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, + sc, len, ndata->reqtype); + + if (retval == IQ_SEND_FAILED) { + dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", + retval); + octeon_free_soft_command(oct, sc); + } else { + netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); + } + + return retval; +} + +/** + * liquidio_xmit - Transmit networks packets to the Octeon interface + * @skb: skbuff struct to be passed to network layer. + * @netdev: pointer to network device + * @returns whether the packet was transmitted to the device okay or not + * (NETDEV_TX_OK or NETDEV_TX_BUSY) + */ +static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octnet_buf_free_info *finfo; + union octnic_cmd_setup cmdsetup; + struct octnic_data_pkt ndata; + struct octeon_instr_irh *irh; + struct oct_iq_stats *stats; + struct octeon_device *oct; + int q_idx = 0, iq_no = 0; + union tx_info *tx_info; + int xmit_more = 0; + struct lio *lio; + int status = 0; + u64 dptr = 0; + u32 tag = 0; + int j; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + q_idx = skb_iq(lio->oct_dev, skb); + tag = q_idx; + iq_no = lio->linfo.txpciq[q_idx].s.q_no; + + stats = &oct->instr_queue[iq_no]->stats; + + /* Check for all conditions in which the current packet cannot be + * transmitted. + */ + if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || + (!lio->linfo.link.s.link_up) || (skb->len <= 0)) { + netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n", + lio->linfo.link.s.link_up); + goto lio_xmit_failed; + } + + /* Use space in skb->cb to store info used to unmap and + * free the buffers. + */ + finfo = (struct octnet_buf_free_info *)skb->cb; + finfo->lio = lio; + finfo->skb = skb; + finfo->sc = NULL; + + /* Prepare the attributes for the data to be passed to OSI. */ + memset(&ndata, 0, sizeof(struct octnic_data_pkt)); + + ndata.buf = finfo; + + ndata.q_no = iq_no; + + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; + } + + ndata.datasize = skb->len; + + cmdsetup.u64 = 0; + cmdsetup.s.iq_no = iq_no; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb->encapsulation) { + cmdsetup.s.tnl_csum = 1; + stats->tx_vxlan++; + } else { + cmdsetup.s.transport_csum = 1; + } + } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + cmdsetup.s.timestamp = 1; + } + + if (!skb_shinfo(skb)->nr_frags) { + cmdsetup.s.u.datasize = skb->len; + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + /* Offload checksum calculation for TCP/UDP packets */ + dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", + __func__); + return NETDEV_TX_BUSY; + } + + ndata.cmd.cmd3.dptr = dptr; + finfo->dptr = dptr; + ndata.reqtype = REQTYPE_NORESP_NET; + + } else { + skb_frag_t *frag; + struct octnic_gather *g; + int i, frags; + + spin_lock(&lio->glist_lock[q_idx]); + g = (struct octnic_gather *) + lio_list_delete_head(&lio->glist[q_idx]); + spin_unlock(&lio->glist_lock[q_idx]); + + if (!g) { + netif_info(lio, tx_err, lio->netdev, + "Transmit scatter gather: glist null!\n"); + goto lio_xmit_failed; + } + + cmdsetup.s.gather = 1; + cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); + octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); + + memset(g->sg, 0, g->sg_size); + + g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, + skb->data, + (skb->len - skb->data_len), + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); + + frags = skb_shinfo(skb)->nr_frags; + i = 1; + while (frags--) { + frag = &skb_shinfo(skb)->frags[i - 1]; + + g->sg[(i >> 2)].ptr[(i & 3)] = + skb_frag_dma_map(&oct->pci_dev->dev, + frag, 0, skb_frag_size(frag), + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, + g->sg[i >> 2].ptr[i & 3])) { + dma_unmap_single(&oct->pci_dev->dev, + g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + for (j = 1; j < i; j++) { + frag = &skb_shinfo(skb)->frags[j - 1]; + dma_unmap_page(&oct->pci_dev->dev, + g->sg[j >> 2].ptr[j & 3], + skb_frag_size(frag), + DMA_TO_DEVICE); + } + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + return NETDEV_TX_BUSY; + } + + add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), + (i & 3)); + i++; + } + + dptr = g->sg_dma_ptr; + + ndata.cmd.cmd3.dptr = dptr; + finfo->dptr = dptr; + finfo->g = g; + + ndata.reqtype = REQTYPE_NORESP_NET_SG; + } + + irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; + tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; + + if (skb_shinfo(skb)->gso_size) { + tx_info->s.gso_size = skb_shinfo(skb)->gso_size; + tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + } + + /* HW insert VLAN tag */ + if (skb_vlan_tag_present(skb)) { + irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT; + irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK; + } + + xmit_more = netdev_xmit_more(); + + if (unlikely(cmdsetup.s.timestamp)) + status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); + else + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); + if (status == IQ_SEND_FAILED) + goto lio_xmit_failed; + + netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); + + if (status == IQ_SEND_STOP) { + dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n", + iq_no); + netif_stop_subqueue(netdev, q_idx); + } + + netif_trans_update(netdev); + + if (tx_info->s.gso_segs) + stats->tx_done += tx_info->s.gso_segs; + else + stats->tx_done++; + stats->tx_tot_bytes += ndata.datasize; + + return NETDEV_TX_OK; + +lio_xmit_failed: + stats->tx_dropped++; + netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", + iq_no, stats->tx_dropped); + if (dptr) + dma_unmap_single(&oct->pci_dev->dev, dptr, + ndata.datasize, DMA_TO_DEVICE); + + octeon_ring_doorbell_locked(oct, iq_no); + + tx_buffer_free(skb); + return NETDEV_TX_OK; +} + +/** + * liquidio_tx_timeout - Network device Tx timeout + * @netdev: pointer to network device + * @txqueue: index of the hung transmit queue + */ +static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) +{ + struct lio *lio; + + lio = GET_LIO(netdev); + + netif_info(lio, tx_err, lio->netdev, + "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", + netdev->stats.tx_dropped); + netif_trans_update(netdev); + wake_txqs(netdev); +} + +static int +liquidio_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", + ret); + return -EPERM; + } + + return 0; +} + +static int +liquidio_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto __attribute__((unused)), u16 vid) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; + nctrl.ncmd.s.param1 = vid; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +/** Sending command to enable/disable RX checksum offload + * @param netdev pointer to network device + * @param command OCTNET_CMD_TNL_RX_CSUM_CTL + * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ + * OCTNET_CMD_RXCSUM_DISABLE + * @returns SUCCESS or FAILURE + */ +static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, + u8 rx_cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.param1 = rx_cmd; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +/** Sending command to add/delete VxLAN UDP port to firmware + * @param netdev pointer to network device + * @param command OCTNET_CMD_VXLAN_PORT_CONFIG + * @param vxlan_port VxLAN port to be added or deleted + * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, + * OCTNET_CMD_VXLAN_PORT_DEL + * @returns SUCCESS or FAILURE + */ +static int liquidio_vxlan_port_command(struct net_device *netdev, int command, + u16 vxlan_port, u8 vxlan_cmd_bit) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = command; + nctrl.ncmd.s.more = vxlan_cmd_bit; + nctrl.ncmd.s.param1 = vxlan_port; + nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); + if (ret) { + dev_err(&oct->pci_dev->dev, + "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n", + ret); + if (ret > 0) + ret = -EIO; + } + return ret; +} + +static int liquidio_udp_tunnel_set_port(struct net_device *netdev, + unsigned int table, unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_ADD); +} + +static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, + unsigned int table, + unsigned int entry, + struct udp_tunnel_info *ti) +{ + return liquidio_vxlan_port_command(netdev, + OCTNET_CMD_VXLAN_PORT_CONFIG, + htons(ti->port), + OCTNET_CMD_VXLAN_PORT_DEL); +} + +static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { + .set_port = liquidio_udp_tunnel_set_port, + .unset_port = liquidio_udp_tunnel_unset_port, + .tables = { + { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, + }, +}; + +/** \brief Net device fix features + * @param netdev pointer to network device + * @param request features requested + * @returns updated features list + */ +static netdev_features_t liquidio_fix_features(struct net_device *netdev, + netdev_features_t request) +{ + struct lio *lio = netdev_priv(netdev); + + if ((request & NETIF_F_RXCSUM) && + !(lio->dev_capability & NETIF_F_RXCSUM)) + request &= ~NETIF_F_RXCSUM; + + if ((request & NETIF_F_HW_CSUM) && + !(lio->dev_capability & NETIF_F_HW_CSUM)) + request &= ~NETIF_F_HW_CSUM; + + if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) + request &= ~NETIF_F_TSO; + + if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) + request &= ~NETIF_F_TSO6; + + if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + /* Disable LRO if RXCSUM is off */ + if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + return request; +} + +/** \brief Net device set features + * @param netdev pointer to network device + * @param features features to enable/disable + */ +static int liquidio_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lio *lio = netdev_priv(netdev); + + if (!((netdev->features ^ features) & NETIF_F_LRO)) + return 0; + + if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + else if (!(features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + if (!(netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + (features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + else if ((netdev->features & NETIF_F_RXCSUM) && + (lio->enc_dev_capability & NETIF_F_RXCSUM) && + !(features & NETIF_F_RXCSUM)) + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_DISABLE); + + return 0; +} + +static const struct net_device_ops lionetdevops = { + .ndo_open = liquidio_open, + .ndo_stop = liquidio_stop, + .ndo_start_xmit = liquidio_xmit, + .ndo_get_stats64 = liquidio_get_stats64, + .ndo_set_mac_address = liquidio_set_mac, + .ndo_set_rx_mode = liquidio_set_mcast_list, + .ndo_tx_timeout = liquidio_tx_timeout, + .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, + .ndo_change_mtu = liquidio_change_mtu, + .ndo_eth_ioctl = liquidio_ioctl, + .ndo_fix_features = liquidio_fix_features, + .ndo_set_features = liquidio_set_features, +}; + +static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + union oct_link_status *ls; + int gmxport = 0; + int i; + + if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { + dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", + recv_pkt->buffer_size[0], + recv_pkt->rh.r_nic_info.gmxport); + goto nic_info_err; + } + + gmxport = recv_pkt->rh.r_nic_info.gmxport; + ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + + OCT_DROQ_INFO_SIZE); + + octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); + + for (i = 0; i < oct->ifcount; i++) { + if (oct->props[i].gmxport == gmxport) { + update_link_status(oct->props[i].netdev, ls); + break; + } + } + +nic_info_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +/** + * setup_nic_devices - Setup network interfaces + * @octeon_dev: octeon device + * + * Called during init time for each device. It assumes the NIC + * is already up and running. The link information for each + * interface is passed in link_info. + */ +static int setup_nic_devices(struct octeon_device *octeon_dev) +{ + int retval, num_iqueues, num_oqueues; + u32 resp_size, data_size; + struct liquidio_if_cfg_resp *resp; + struct octeon_soft_command *sc; + union oct_nic_if_cfg if_cfg; + struct octdev_props *props; + struct net_device *netdev; + struct lio_version *vdata; + struct lio *lio = NULL; + u8 mac[ETH_ALEN], i, j; + u32 ifidx_or_pfnum; + + ifidx_or_pfnum = octeon_dev->pf_num; + + /* This is to handle link status changes */ + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO, + lio_nic_info, octeon_dev); + + /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. + * They are handled directly. + */ + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, + free_netbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, + free_netsgbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, + free_netsgbuf_with_resp); + + for (i = 0; i < octeon_dev->ifcount; i++) { + resp_size = sizeof(struct liquidio_if_cfg_resp); + data_size = sizeof(struct lio_version); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(octeon_dev, data_size, + resp_size, 0); + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + vdata = (struct lio_version *)sc->virtdptr; + + *((u64 *)vdata) = 0; + vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); + vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); + vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); + + if_cfg.u64 = 0; + + if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf; + if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf; + if_cfg.s.base_queue = 0; + + sc->iq_no = 0; + + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, + OPCODE_NIC_IF_CFG, 0, if_cfg.u64, + 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + retval = octeon_send_soft_command(octeon_dev, sc); + if (retval == IQ_SEND_FAILED) { + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed status: %x\n", retval); + /* Soft instr is freed by driver in case of failure. */ + octeon_free_soft_command(octeon_dev, sc); + return(-EIO); + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); + if (retval) + return retval; + + retval = resp->status; + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed, retval = %d\n", retval); + WRITE_ONCE(sc->caller_is_done, true); + return -EIO; + } + + snprintf(octeon_dev->fw_info.liquidio_firmware_version, + 32, "%s", + resp->cfg_info.liquidio_firmware_version); + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + num_iqueues = hweight64(resp->cfg_info.iqmask); + num_oqueues = hweight64(resp->cfg_info.oqmask); + + if (!(num_iqueues) || !(num_oqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", + resp->cfg_info.iqmask, resp->cfg_info.oqmask); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } + dev_dbg(&octeon_dev->pci_dev->dev, + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, + num_iqueues, num_oqueues); + + netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + + if (!netdev) { + dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); + WRITE_ONCE(sc->caller_is_done, true); + goto setup_nic_dev_done; + } + + SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); + + /* Associate the routines that will handle different + * netdev tasks. + */ + netdev->netdev_ops = &lionetdevops; + + lio = GET_LIO(netdev); + + memset(lio, 0, sizeof(struct lio)); + + lio->ifidx = ifidx_or_pfnum; + + props = &octeon_dev->props[i]; + props->gmxport = resp->cfg_info.linfo.gmxport; + props->netdev = netdev; + + lio->linfo.num_rxpciq = num_oqueues; + lio->linfo.num_txpciq = num_iqueues; + + for (j = 0; j < num_oqueues; j++) { + lio->linfo.rxpciq[j].u64 = + resp->cfg_info.linfo.rxpciq[j].u64; + } + for (j = 0; j < num_iqueues; j++) { + lio->linfo.txpciq[j].u64 = + resp->cfg_info.linfo.txpciq[j].u64; + } + + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + lio->linfo.macaddr_is_admin_asgnd = + resp->cfg_info.linfo.macaddr_is_admin_asgnd; + lio->linfo.macaddr_spoofchk = + resp->cfg_info.linfo.macaddr_spoofchk; + + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + lio->dev_capability = NETIF_F_HIGHDMA + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_GRO + | NETIF_F_LRO; + netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device + */ + lio->enc_dev_capability = NETIF_F_IP_CSUM + | NETIF_F_IPV6_CSUM + | NETIF_F_GSO_UDP_TUNNEL + | NETIF_F_HW_CSUM | NETIF_F_SG + | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + + netdev->hw_enc_features = + (lio->enc_dev_capability & ~NETIF_F_LRO); + netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; + + netdev->vlan_features = lio->dev_capability; + /* Add any unchangeable hw features */ + lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX; + + netdev->features = (lio->dev_capability & ~NETIF_F_LRO); + + netdev->hw_features = lio->dev_capability; + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + /* MTU range: 68 - 16000 */ + netdev->min_mtu = LIO_MIN_MTU_SIZE; + netdev->max_mtu = LIO_MAX_MTU_SIZE; + + WRITE_ONCE(sc->caller_is_done, true); + + /* Point to the properties for octeon device to which this + * interface belongs. + */ + lio->oct_dev = octeon_dev; + lio->octprops = props; + lio->netdev = netdev; + + dev_dbg(&octeon_dev->pci_dev->dev, + "if%d gmx: %d hw_addr: 0x%llx\n", i, + lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); + + /* 64-bit swap required on LE machines */ + octeon_swap_8B_data(&lio->linfo.hw_addr, 1); + for (j = 0; j < ETH_ALEN; j++) + mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); + + /* Copy MAC Address to OS network device structure */ + eth_hw_addr_set(netdev, mac); + + if (liquidio_setup_io_queues(octeon_dev, i, + lio->linfo.num_txpciq, + lio->linfo.num_rxpciq)) { + dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); + goto setup_nic_dev_free; + } + + ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); + + /* For VFs, enable Octeon device interrupts here, + * as this is contingent upon IO queue setup + */ + octeon_dev->fn_list.enable_interrupt(octeon_dev, + OCTEON_ALL_INTR); + + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0].s.q_no; + lio->rxq = lio->linfo.rxpciq[0].s.q_no; + + lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); + lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); + + if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Gather list allocation failed\n"); + goto setup_nic_dev_free; + } + + /* Register ethtool support */ + liquidio_set_ethtool_ops(netdev); + if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID) + octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; + else + octeon_dev->priv_flags = 0x0; + + if (netdev->features & NETIF_F_LRO) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, + OCTNIC_LROIPV4 | OCTNIC_LROIPV6); + + if (setup_link_status_change_wq(netdev)) + goto setup_nic_dev_free; + + if (setup_rx_oom_poll_fn(netdev)) + goto setup_nic_dev_free; + + /* Register the network device with the OS */ + if (register_netdev(netdev)) { + dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); + goto setup_nic_dev_free; + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", + i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + netif_carrier_off(netdev); + lio->link_changes++; + + ifstate_set(lio, LIO_IFSTATE_REGISTERED); + + /* Sending command to firmware to enable Rx checksum offload + * by default at the time of setup of Liquidio driver for + * this device + */ + liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, + OCTNET_CMD_RXCSUM_ENABLE); + liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, + OCTNET_CMD_TXCSUM_ENABLE); + + dev_dbg(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup successful\n", i); + + octeon_dev->no_speed_setting = 1; + } + + return 0; + +setup_nic_dev_free: + + while (i--) { + dev_err(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup failed\n", i); + liquidio_destroy_nic_device(octeon_dev, i); + } + +setup_nic_dev_done: + + return -ENODEV; +} + +/** + * liquidio_init_nic_module - initialize the NIC + * @oct: octeon device + * + * This initialization routine is called once the Octeon device application is + * up and running + */ +static int liquidio_init_nic_module(struct octeon_device *oct) +{ + int num_nic_ports = 1; + int i, retval = 0; + + dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); + + /* only default iq and oq were initialized + * initialize the rest as well run port_config command for each port + */ + oct->ifcount = num_nic_ports; + memset(oct->props, 0, + sizeof(struct octdev_props) * num_nic_ports); + + for (i = 0; i < MAX_OCTEON_LINKS; i++) + oct->props[i].gmxport = -1; + + retval = setup_nic_devices(oct); + if (retval) { + dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); + goto octnet_init_failure; + } + + dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); + + return retval; + +octnet_init_failure: + + oct->ifcount = 0; + + return retval; +} + +/** + * octeon_device_init - Device initialization for each Octeon device that is probed + * @oct: octeon device + */ +static int octeon_device_init(struct octeon_device *oct) +{ + u32 rev_id; + int j; + + atomic_set(&oct->status, OCT_DEV_BEGIN_STATE); + + /* Enable access to the octeon device and make its DMA capability + * known to the OS. + */ + if (octeon_pci_os_setup(oct)) + return 1; + atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE); + + oct->chip_id = OCTEON_CN23XX_VF_VID; + pci_read_config_dword(oct->pci_dev, 8, &rev_id); + oct->rev_id = rev_id & 0xff; + + if (cn23xx_setup_octeon_vf_device(oct)) + return 1; + + atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE); + + oct->app_mode = CVM_DRV_NIC_APP; + + /* Initialize the dispatch mechanism used to push packets arriving on + * Octeon Output queues. + */ + if (octeon_init_dispatch_list(oct)) + return 1; + + atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE); + + if (octeon_set_io_queues_off(oct)) { + dev_err(&oct->pci_dev->dev, "setting io queues off failed\n"); + return 1; + } + + if (oct->fn_list.setup_device_regs(oct)) { + dev_err(&oct->pci_dev->dev, "device registers configuration failed\n"); + return 1; + } + + /* Initialize soft command buffer pool */ + if (octeon_setup_sc_buffer_pool(oct)) { + dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); + + /* Setup the data structures that manage this Octeon's Input queues. */ + if (octeon_setup_instr_queues(oct)) { + dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); + + /* Initialize lists to manage the requests of different types that + * arrive from user & kernel applications for this octeon device. + */ + if (octeon_setup_response_list(oct)) { + dev_err(&oct->pci_dev->dev, "Response list allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE); + + if (octeon_setup_output_queues(oct)) { + dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE); + + if (oct->fn_list.setup_mbox(oct)) { + dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE); + + if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) { + dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n"); + return 1; + } + atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); + + dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n", + oct->sriov_info.rings_per_vf); + + /* Setup the interrupt handler and record the INT SUM register address*/ + if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf)) + return 1; + + atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE); + + /* *************************************************************** + * The interrupts need to be enabled for the PF<-->VF handshake. + * They are [re]-enabled after the PF<-->VF handshake so that the + * correct OQ tick value is used (i.e. the value retrieved from + * the PF as part of the handshake). + */ + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + + if (cn23xx_octeon_pfvf_handshake(oct)) + return 1; + + /* Here we [re]-enable the interrupts so that the correct OQ tick value + * is used (i.e. the value that was retrieved during the handshake) + */ + + /* Enable Octeon device interrupts */ + oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR); + /* *************************************************************** */ + + /* Enable the input and output queues for this Octeon device */ + if (oct->fn_list.enable_io_queues(oct)) { + dev_err(&oct->pci_dev->dev, "enabling io queues failed\n"); + return 1; + } + + atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE); + + atomic_set(&oct->status, OCT_DEV_HOST_OK); + + /* Send Credit for Octeon Output queues. Credits are always sent after + * the output queue is enabled. + */ + for (j = 0; j < oct->num_oqs; j++) + writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg); + + /* Packets can start arriving on the output queues from this point. */ + + atomic_set(&oct->status, OCT_DEV_CORE_OK); + + atomic_set(&oct->status, OCT_DEV_RUNNING); + + if (liquidio_init_nic_module(oct)) + return 1; + + return 0; +} + +static int __init liquidio_vf_init(void) +{ + octeon_init_device_list(0); + return pci_register_driver(&liquidio_vf_pci_driver); +} + +static void __exit liquidio_vf_exit(void) +{ + pci_unregister_driver(&liquidio_vf_pci_driver); + + pr_info("LiquidIO_VF network module is now unloaded\n"); +} + +module_init(liquidio_vf_init); +module_exit(liquidio_vf_exit); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c new file mode 100644 index 000000000..600de587d --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c @@ -0,0 +1,672 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/if_vlan.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "lio_vf_rep.h" + +static int lio_vf_rep_open(struct net_device *ndev); +static int lio_vf_rep_stop(struct net_device *ndev); +static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb, + struct net_device *ndev); +static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue); +static int lio_vf_rep_phys_port_name(struct net_device *dev, + char *buf, size_t len); +static void lio_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats64); +static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu); +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid); + +static const struct net_device_ops lio_vf_rep_ndev_ops = { + .ndo_open = lio_vf_rep_open, + .ndo_stop = lio_vf_rep_stop, + .ndo_start_xmit = lio_vf_rep_pkt_xmit, + .ndo_tx_timeout = lio_vf_rep_tx_timeout, + .ndo_get_phys_port_name = lio_vf_rep_phys_port_name, + .ndo_get_stats64 = lio_vf_rep_get_stats64, + .ndo_change_mtu = lio_vf_rep_change_mtu, + .ndo_get_port_parent_id = lio_vf_get_port_parent_id, +}; + +static int +lio_vf_rep_send_soft_command(struct octeon_device *oct, + void *req, int req_size, + void *resp, int resp_size) +{ + int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size; + struct octeon_soft_command *sc = NULL; + struct lio_vf_rep_resp *rep_resp; + void *sc_req; + int err; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, req_size, + tot_resp_size, 0); + if (!sc) + return -ENOMEM; + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + sc_req = (struct lio_vf_rep_req *)sc->virtdptr; + memcpy(sc_req, req, req_size); + + rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr; + memset(rep_resp, 0, tot_resp_size); + WRITE_ONCE(rep_resp->status, 1); + + sc->iq_no = 0; + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, + OPCODE_NIC_VF_REP_CMD, 0, 0, 0); + + err = octeon_send_soft_command(oct, sc); + if (err == IQ_SEND_FAILED) + goto free_buff; + + err = wait_for_sc_completion_timeout(oct, sc, 0); + if (err) + return err; + + err = READ_ONCE(rep_resp->status) ? -EBUSY : 0; + if (err) + dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); + else if (resp) + memcpy(resp, (rep_resp + 1), resp_size); + + WRITE_ONCE(sc->caller_is_done, true); + return err; + +free_buff: + octeon_free_soft_command(oct, sc); + + return err; +} + +static int +lio_vf_rep_open(struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATE; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + + if (ret) { + dev_err(&oct->pci_dev->dev, + "VF_REP open failed with err %d\n", ret); + return -EIO; + } + + atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) | + LIO_IFSTATE_RUNNING)); + + netif_carrier_on(ndev); + netif_start_queue(ndev); + + return 0; +} + +static int +lio_vf_rep_stop(struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATE; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + + if (ret) { + dev_err(&oct->pci_dev->dev, + "VF_REP dev stop failed with err %d\n", ret); + return -EIO; + } + + atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) & + ~LIO_IFSTATE_RUNNING)); + + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + return 0; +} + +static void +lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue) +{ + netif_trans_update(ndev); + + netif_wake_queue(ndev); +} + +static void +lio_vf_rep_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *stats64) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + + /* Swap tx and rx stats as VF rep is a switch port */ + stats64->tx_packets = vf_rep->stats.rx_packets; + stats64->tx_bytes = vf_rep->stats.rx_bytes; + stats64->tx_dropped = vf_rep->stats.rx_dropped; + + stats64->rx_packets = vf_rep->stats.tx_packets; + stats64->rx_bytes = vf_rep->stats.tx_bytes; + stats64->rx_dropped = vf_rep->stats.tx_dropped; +} + +static int +lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_MTU; + rep_cfg.ifidx = vf_rep->ifidx; + rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu); + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + if (ret) { + dev_err(&oct->pci_dev->dev, + "Change MTU failed with err %d\n", ret); + return -EIO; + } + + ndev->mtu = new_mtu; + + return 0; +} + +static int +lio_vf_rep_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + struct octeon_device *oct = vf_rep->oct; + int ret; + + ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num, + vf_rep->ifidx - oct->pf_num * 64 - 1); + if (ret >= len) + return -EOPNOTSUPP; + + return 0; +} + +static struct net_device * +lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx) +{ + int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1; + int vfid_mask = max_vfs - 1; + + if (ifidx <= oct->pf_num * max_vfs || + ifidx >= oct->pf_num * max_vfs + max_vfs) + return NULL; + + /* ifidx 1-63 for PF0 VFs + * ifidx 65-127 for PF1 VFs + */ + vf_id = (ifidx & vfid_mask) - 1; + + return oct->vf_rep_list.ndev[vf_id]; +} + +static void +lio_vf_rep_copy_packet(struct octeon_device *oct, + struct sk_buff *skb, + int len) +{ + if (likely(len > MIN_SKB_SIZE)) { + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (pg_info->page) { + va = page_address(pg_info->page) + + pg_info->page_offset; + memcpy(skb->data, va, MIN_SKB_SIZE); + skb_put(skb, MIN_SKB_SIZE); + } + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + pg_info->page, + pg_info->page_offset + MIN_SKB_SIZE, + len - MIN_SKB_SIZE, + LIO_RXBUFFER_SZ); + } else { + struct octeon_skb_page_info *pg_info = + ((struct octeon_skb_page_info *)(skb->cb)); + + skb_copy_to_linear_data(skb, page_address(pg_info->page) + + pg_info->page_offset, len); + skb_put(skb, len); + put_page(pg_info->page); + } +} + +static int +lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + struct lio_vf_rep_desc *vf_rep; + struct net_device *vf_ndev; + struct octeon_device *oct; + union octeon_rh *rh; + struct sk_buff *skb; + int i, ifidx; + + oct = lio_get_device(recv_pkt->octeon_id); + if (!oct) + goto free_buffers; + + skb = recv_pkt->buffer_ptr[0]; + rh = &recv_pkt->rh; + ifidx = rh->r.ossp; + + vf_ndev = lio_vf_rep_get_ndev(oct, ifidx); + if (!vf_ndev) + goto free_buffers; + + vf_rep = netdev_priv(vf_ndev); + if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) || + recv_pkt->buffer_count > 1) + goto free_buffers; + + skb->dev = vf_ndev; + + /* Multiple buffers are not used for vf_rep packets. + * So just buffer_size[0] is valid. + */ + lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]); + + skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT); + skb->protocol = eth_type_trans(skb, skb->dev); + skb->ip_summed = CHECKSUM_NONE; + + netif_rx(skb); + + octeon_free_recv_info(recv_info); + + return 0; + +free_buffers: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + + octeon_free_recv_info(recv_info); + + return 0; +} + +static void +lio_vf_rep_packet_sent_callback(struct octeon_device *oct, + u32 status, void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct sk_buff *skb = sc->ctxptr; + struct net_device *ndev = skb->dev; + u32 iq_no; + + dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, + sc->datasize, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + iq_no = sc->iq_no; + octeon_free_soft_command(oct, sc); + + if (octnet_iq_is_full(oct, iq_no)) + return; + + if (netif_queue_stopped(ndev)) + netif_wake_queue(ndev); +} + +static netdev_tx_t +lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev); + struct net_device *parent_ndev = vf_rep->parent_ndev; + struct octeon_device *oct = vf_rep->oct; + struct octeon_instr_pki_ih3 *pki_ih3; + struct octeon_soft_command *sc; + struct lio *parent_lio; + int status; + + parent_lio = GET_LIO(parent_ndev); + + if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) || + skb->len <= 0) + goto xmit_failed; + + if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) { + dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n"); + netif_stop_queue(ndev); + return NETDEV_TX_BUSY; + } + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, 0, 16, 0); + if (!sc) { + dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n"); + goto xmit_failed; + } + + /* Multiple buffers are not used for vf_rep packets. */ + if (skb_shinfo(skb)->nr_frags != 0) { + dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n"); + octeon_free_soft_command(oct, sc); + goto xmit_failed; + } + + sc->dmadptr = dma_map_single(&oct->pci_dev->dev, + skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) { + dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n"); + octeon_free_soft_command(oct, sc); + goto xmit_failed; + } + + sc->virtdptr = skb->data; + sc->datasize = skb->len; + sc->ctxptr = skb; + sc->iq_no = parent_lio->txq; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT, + vf_rep->ifidx, 0, 0); + pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; + pki_ih3->tagtype = ORDERED_TAG; + + sc->callback = lio_vf_rep_packet_sent_callback; + sc->callback_arg = sc; + + status = octeon_send_soft_command(oct, sc); + if (status == IQ_SEND_FAILED) { + dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr, + sc->datasize, DMA_TO_DEVICE); + octeon_free_soft_command(oct, sc); + goto xmit_failed; + } + + if (status == IQ_SEND_STOP) + netif_stop_queue(ndev); + + netif_trans_update(ndev); + + return NETDEV_TX_OK; + +xmit_failed: + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static int lio_vf_get_port_parent_id(struct net_device *dev, + struct netdev_phys_item_id *ppid) +{ + struct lio_vf_rep_desc *vf_rep = netdev_priv(dev); + struct net_device *parent_ndev = vf_rep->parent_ndev; + struct lio *lio = GET_LIO(parent_ndev); + + ppid->id_len = ETH_ALEN; + ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); + + return 0; +} + +static void +lio_vf_rep_fetch_stats(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio_vf_rep_desc *vf_rep = wk->ctxptr; + struct lio_vf_rep_stats stats; + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + oct = vf_rep->oct; + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_STATS; + rep_cfg.ifidx = vf_rep->ifidx; + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg), + &stats, sizeof(stats)); + + if (!ret) { + octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3)); + memcpy(&vf_rep->stats, &stats, sizeof(stats)); + } + + schedule_delayed_work(&vf_rep->stats_wk.work, + msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS)); +} + +int +lio_vf_rep_create(struct octeon_device *oct) +{ + struct lio_vf_rep_desc *vf_rep; + struct net_device *ndev; + int i, num_vfs; + + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return 0; + + if (!oct->sriov_info.sriov_enabled) + return 0; + + num_vfs = oct->sriov_info.num_vfs_alloced; + + oct->vf_rep_list.num_vfs = 0; + for (i = 0; i < num_vfs; i++) { + ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc)); + + if (!ndev) { + dev_err(&oct->pci_dev->dev, + "VF rep device %d creation failed\n", i); + goto cleanup; + } + + ndev->min_mtu = LIO_MIN_MTU_SIZE; + ndev->max_mtu = LIO_MAX_MTU_SIZE; + ndev->netdev_ops = &lio_vf_rep_ndev_ops; + + vf_rep = netdev_priv(ndev); + memset(vf_rep, 0, sizeof(*vf_rep)); + + vf_rep->ndev = ndev; + vf_rep->oct = oct; + vf_rep->parent_ndev = oct->props[0].netdev; + vf_rep->ifidx = (oct->pf_num * 64) + i + 1; + + eth_hw_addr_random(ndev); + + if (register_netdev(ndev)) { + dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n"); + + free_netdev(ndev); + goto cleanup; + } + + netif_carrier_off(ndev); + + INIT_DELAYED_WORK(&vf_rep->stats_wk.work, + lio_vf_rep_fetch_stats); + vf_rep->stats_wk.ctxptr = (void *)vf_rep; + schedule_delayed_work(&vf_rep->stats_wk.work, + msecs_to_jiffies + (LIO_VF_REP_STATS_POLL_TIME_MS)); + oct->vf_rep_list.num_vfs++; + oct->vf_rep_list.ndev[i] = ndev; + } + + if (octeon_register_dispatch_fn(oct, OPCODE_NIC, + OPCODE_NIC_VF_REP_PKT, + lio_vf_rep_pkt_recv, oct)) { + dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n"); + + goto cleanup; + } + + return 0; + +cleanup: + for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { + ndev = oct->vf_rep_list.ndev[i]; + oct->vf_rep_list.ndev[i] = NULL; + if (ndev) { + vf_rep = netdev_priv(ndev); + cancel_delayed_work_sync + (&vf_rep->stats_wk.work); + unregister_netdev(ndev); + free_netdev(ndev); + } + } + + oct->vf_rep_list.num_vfs = 0; + + return -1; +} + +void +lio_vf_rep_destroy(struct octeon_device *oct) +{ + struct lio_vf_rep_desc *vf_rep; + struct net_device *ndev; + int i; + + if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) + return; + + if (!oct->sriov_info.sriov_enabled) + return; + + for (i = 0; i < oct->vf_rep_list.num_vfs; i++) { + ndev = oct->vf_rep_list.ndev[i]; + oct->vf_rep_list.ndev[i] = NULL; + if (ndev) { + vf_rep = netdev_priv(ndev); + cancel_delayed_work_sync + (&vf_rep->stats_wk.work); + netif_tx_disable(ndev); + netif_carrier_off(ndev); + + unregister_netdev(ndev); + free_netdev(ndev); + } + } + + oct->vf_rep_list.num_vfs = 0; +} + +static int +lio_vf_rep_netdev_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct lio_vf_rep_desc *vf_rep; + struct lio_vf_rep_req rep_cfg; + struct octeon_device *oct; + int ret; + + switch (event) { + case NETDEV_REGISTER: + case NETDEV_CHANGENAME: + break; + + default: + return NOTIFY_DONE; + } + + if (ndev->netdev_ops != &lio_vf_rep_ndev_ops) + return NOTIFY_DONE; + + vf_rep = netdev_priv(ndev); + oct = vf_rep->oct; + + if (strlen(ndev->name) > LIO_IF_NAME_SIZE) { + dev_err(&oct->pci_dev->dev, + "Device name change sync failed as the size is > %d\n", + LIO_IF_NAME_SIZE); + return NOTIFY_DONE; + } + + memset(&rep_cfg, 0, sizeof(rep_cfg)); + rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME; + rep_cfg.ifidx = vf_rep->ifidx; + strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE); + + ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, + sizeof(rep_cfg), NULL, 0); + if (ret) + dev_err(&oct->pci_dev->dev, + "vf_rep netdev name change failed with err %d\n", ret); + + return NOTIFY_DONE; +} + +static struct notifier_block lio_vf_rep_netdev_notifier = { + .notifier_call = lio_vf_rep_netdev_event, +}; + +int +lio_vf_rep_modinit(void) +{ + if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) { + pr_err("netdev notifier registration failed\n"); + return -EFAULT; + } + + return 0; +} + +void +lio_vf_rep_modexit(void) +{ + if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier)) + pr_err("netdev notifier unregister failed\n"); +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h new file mode 100644 index 000000000..bb3cedc63 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h @@ -0,0 +1,49 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2017 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_vf_main.h + * \brief Host Driver: This file defines vf_rep related macros and structures + */ +#ifndef __LIO_VF_REP_H__ +#define __LIO_VF_REP_H__ +#define LIO_VF_REP_REQ_TMO_MS 5000 +#define LIO_VF_REP_STATS_POLL_TIME_MS 200 + +struct lio_vf_rep_desc { + struct net_device *parent_ndev; + struct net_device *ndev; + struct octeon_device *oct; + struct lio_vf_rep_stats stats; + struct cavium_wk stats_wk; + atomic_t ifstate; + int ifidx; +}; + +struct lio_vf_rep_sc_ctx { + struct completion complete; +}; + +int lio_vf_rep_create(struct octeon_device *oct); +void lio_vf_rep_destroy(struct octeon_device *oct); +int lio_vf_rep_modinit(void); +void lio_vf_rep_modexit(void); +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h new file mode 100644 index 000000000..4da90757c --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -0,0 +1,1039 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file liquidio_common.h + * \brief Common: Structures and macros used in PCI-NIC package by core and + * host driver. + */ + +#ifndef __LIQUIDIO_COMMON_H__ +#define __LIQUIDIO_COMMON_H__ + +#include "octeon_config.h" + +#define LIQUIDIO_BASE_MAJOR_VERSION 1 +#define LIQUIDIO_BASE_MINOR_VERSION 7 +#define LIQUIDIO_BASE_MICRO_VERSION 2 +#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \ + __stringify(LIQUIDIO_BASE_MINOR_VERSION) + +struct lio_version { + u16 major; + u16 minor; + u16 micro; + u16 reserved; +}; + +#define CONTROL_IQ 0 +/** Tag types used by Octeon cores in its work. */ +enum octeon_tag_type { + ORDERED_TAG = 0, + ATOMIC_TAG = 1, + NULL_TAG = 2, + NULL_NULL_TAG = 3 +}; + +/* pre-defined host->NIC tag values */ +#define LIO_CONTROL (0x11111110) +#define LIO_DATA(i) (0x11111111 + (i)) + +/* Opcodes used by host driver/apps to perform operations on the core. + * These are used to identify the major subsystem that the operation + * is for. + */ +#define OPCODE_CORE 0 /* used for generic core operations */ +#define OPCODE_NIC 1 /* used for NIC operations */ +/* Subcodes are used by host driver/apps to identify the sub-operation + * for the core. They only need to by unique for a given subsystem. + */ +#define OPCODE_SUBCODE(op, sub) ((((op) & 0x0f) << 8) | ((sub) & 0x7f)) + +/** OPCODE_CORE subcodes. For future use. */ + +/** OPCODE_NIC subcodes */ + +/* This subcode is sent by core PCI driver to indicate cores are ready. */ +#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01 +#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */ +#define OPCODE_NIC_CMD 0x03 +#define OPCODE_NIC_INFO 0x04 +#define OPCODE_NIC_PORT_STATS 0x05 +#define OPCODE_NIC_MDIO45 0x06 +#define OPCODE_NIC_TIMESTAMP 0x07 +#define OPCODE_NIC_INTRMOD_CFG 0x08 +#define OPCODE_NIC_IF_CFG 0x09 +#define OPCODE_NIC_VF_DRV_NOTICE 0x0A +#define OPCODE_NIC_INTRMOD_PARAMS 0x0B +#define OPCODE_NIC_QCOUNT_UPDATE 0x12 +#define OPCODE_NIC_SET_TRUSTED_VF 0x13 +#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14 +#define VF_DRV_LOADED 1 +#define VF_DRV_REMOVED -1 +#define VF_DRV_MACADDR_CHANGED 2 + +#define OPCODE_NIC_VF_REP_PKT 0x15 +#define OPCODE_NIC_VF_REP_CMD 0x16 +#define OPCODE_NIC_UBOOT_CTL 0x17 + +#define CORE_DRV_TEST_SCATTER_OP 0xFFF5 + +/* Application codes advertised by the core driver initialization packet. */ +#define CVM_DRV_APP_START 0x0 +#define CVM_DRV_NO_APP 0 +#define CVM_DRV_APP_COUNT 0x2 +#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0) +#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1) +#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) +#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) + +#define BYTES_PER_DHLEN_UNIT 8 +#define MAX_REG_CNT 2000000U +#define INTRNAMSIZ 32 +#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ) +#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2) +#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2) + +#define SCR2_BIT_FW_LOADED 63 + +/* App specific capabilities from firmware to pf driver */ +#define LIQUIDIO_TIME_SYNC_CAP 0x1 +#define LIQUIDIO_SWITCHDEV_CAP 0x2 +#define LIQUIDIO_SPOOFCHK_CAP 0x4 + +/* error status return from firmware */ +#define OCTEON_REQUEST_NO_PERMISSION 0xc + +static inline u32 incr_index(u32 index, u32 count, u32 max) +{ + if ((index + count) >= max) + index = index + count - max; + else + index += count; + + return index; +} + +#define OCT_BOARD_NAME 32 +#define OCT_SERIAL_LEN 64 + +/* Structure used by core driver to send indication that the Octeon + * application is ready. + */ +struct octeon_core_setup { + u64 corefreq; + + char boardname[OCT_BOARD_NAME]; + + char board_serial_number[OCT_SERIAL_LEN]; + + u64 board_rev_major; + + u64 board_rev_minor; + +}; + +/*--------------------------- SCATTER GATHER ENTRY -----------------------*/ + +/* The Scatter-Gather List Entry. The scatter or gather component used with + * a Octeon input instruction has this format. + */ +struct octeon_sg_entry { + /** The first 64 bit gives the size of data in each dptr.*/ + union { + u16 size[4]; + u64 size64; + } u; + + /** The 4 dptr pointers for this entry. */ + u64 ptr[4]; + +}; + +#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry)) + +/* \brief Add size to gather list + * @param sg_entry scatter/gather entry + * @param size size to add + * @param pos position to add it. + */ +static inline void add_sg_size(struct octeon_sg_entry *sg_entry, + u16 size, + u32 pos) +{ +#ifdef __BIG_ENDIAN_BITFIELD + sg_entry->u.size[pos] = size; +#else + sg_entry->u.size[3 - pos] = size; +#endif +} + +/*------------------------- End Scatter/Gather ---------------------------*/ + +#define OCTNET_FRM_LENGTH_SIZE 8 + +#define OCTNET_FRM_PTP_HEADER_SIZE 8 + +#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */ + +#define OCTNET_MIN_FRM_SIZE 64 + +#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) + +#define OCTNET_DEFAULT_MTU (1500) +#define OCTNET_DEFAULT_FRM_SIZE (OCTNET_DEFAULT_MTU + OCTNET_FRM_HEADER_SIZE) + +/** NIC Commands are sent using this Octeon Input Queue */ +#define OCTNET_CMD_Q 0 + +/* NIC Command types */ +#define OCTNET_CMD_CHANGE_MTU 0x1 +#define OCTNET_CMD_CHANGE_MACADDR 0x2 +#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3 +#define OCTNET_CMD_RX_CTL 0x4 + +#define OCTNET_CMD_SET_MULTI_LIST 0x5 +#define OCTNET_CMD_CLEAR_STATS 0x6 + +/* command for setting the speed, duplex & autoneg */ +#define OCTNET_CMD_SET_SETTINGS 0x7 +#define OCTNET_CMD_SET_FLOW_CTL 0x8 + +#define OCTNET_CMD_MDIO_READ_WRITE 0x9 +#define OCTNET_CMD_GPIO_ACCESS 0xA +#define OCTNET_CMD_LRO_ENABLE 0xB +#define OCTNET_CMD_LRO_DISABLE 0xC +#define OCTNET_CMD_SET_RSS 0xD +#define OCTNET_CMD_WRITE_SA 0xE +#define OCTNET_CMD_DELETE_SA 0xF +#define OCTNET_CMD_UPDATE_SA 0x12 + +#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10 +#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11 +#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13 +#define OCTNET_CMD_VERBOSE_ENABLE 0x14 +#define OCTNET_CMD_VERBOSE_DISABLE 0x15 + +#define OCTNET_CMD_VLAN_FILTER_CTL 0x16 +#define OCTNET_CMD_ADD_VLAN_FILTER 0x17 +#define OCTNET_CMD_DEL_VLAN_FILTER 0x18 +#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19 + +#define OCTNET_CMD_ID_ACTIVE 0x1a + +#define OCTNET_CMD_SET_UC_LIST 0x1b +#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c + +#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f + +#define OCTNET_CMD_GROUP1 1 +#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1 +#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK + +#define OCTNET_CMD_VXLAN_PORT_ADD 0x0 +#define OCTNET_CMD_VXLAN_PORT_DEL 0x1 +#define OCTNET_CMD_RXCSUM_ENABLE 0x0 +#define OCTNET_CMD_RXCSUM_DISABLE 0x1 +#define OCTNET_CMD_TXCSUM_ENABLE 0x0 +#define OCTNET_CMD_TXCSUM_DISABLE 0x1 +#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1 +#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0 + +#define OCTNET_CMD_FAIL 0x1 + +#define SEAPI_CMD_FEC_SET 0x0 +#define SEAPI_CMD_FEC_SET_DISABLE 0x0 +#define SEAPI_CMD_FEC_SET_RS 0x1 +#define SEAPI_CMD_FEC_GET 0x1 + +#define SEAPI_CMD_SPEED_SET 0x2 +#define SEAPI_CMD_SPEED_GET 0x3 + +#define OPCODE_NIC_VF_PORT_STATS 0x22 + +#define LIO_CMD_WAIT_TM 100 + +/* RX(packets coming from wire) Checksum verification flags */ +/* TCP/UDP csum */ +#define CNNIC_L4SUM_VERIFIED 0x1 +#define CNNIC_IPSUM_VERIFIED 0x2 +#define CNNIC_TUN_CSUM_VERIFIED 0x4 +#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED) + +/*LROIPV4 and LROIPV6 Flags*/ +#define OCTNIC_LROIPV4 0x1 +#define OCTNIC_LROIPV6 0x2 + +/* Interface flags communicated between host driver and core app. */ +enum octnet_ifflags { + OCTNET_IFFLAG_PROMISC = 0x01, + OCTNET_IFFLAG_ALLMULTI = 0x02, + OCTNET_IFFLAG_MULTICAST = 0x04, + OCTNET_IFFLAG_BROADCAST = 0x08, + OCTNET_IFFLAG_UNICAST = 0x10 +}; + +/* wqe + * --------------- 0 + * | wqe word0-3 | + * --------------- 32 + * | PCI IH | + * --------------- 40 + * | RPTR | + * --------------- 48 + * | PCI IRH | + * --------------- 56 + * | OCT_NET_CMD | + * --------------- 64 + * | Addtl 8-BData | + * | | + * --------------- + */ + +union octnet_cmd { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 cmd:5; + + u64 more:6; /* How many udd words follow the command */ + + u64 cmdgroup:8; + u64 reserved:21; + + u64 param1:16; + + u64 param2:8; + +#else + + u64 param2:8; + + u64 param1:16; + + u64 reserved:21; + u64 cmdgroup:8; + + u64 more:6; + + u64 cmd:5; + +#endif + } s; + +}; + +#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) + +/*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ +#define LIO_SOFTCMDRESP_IH2 40 +#define LIO_SOFTCMDRESP_IH3 (40 + 8) + +#define LIO_PCICMD_O2 24 +#define LIO_PCICMD_O3 (24 + 8) + +/* Instruction Header(DPI) - for OCTEON-III models */ +struct octeon_instr_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Reserved3 */ + u64 reserved3:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved1 */ + u64 reserved1:32; + +#else + /** Reserved1 */ + u64 reserved1:32; + + /** PKI port kind - PKIND */ + u64 pkind:6; + + /** Reserved2 */ + u64 reserved2:4; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Reserved3 */ + u64 reserved3:1; + +#endif +}; + +/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */ +/** BIG ENDIAN format. */ +struct octeon_instr_pki_ih3 { +#ifdef __BIG_ENDIAN_BITFIELD + + /** Wider bit */ + u64 w:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Use Tag */ + u64 utag:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Reserved2 */ + u64 reserved2:1; + + /** Parse Mode */ + u64 pm:3; + + /** Skip Length */ + u64 sl:8; + + /** Use Tag Type */ + u64 utt:1; + + /** Tag type */ + u64 tagtype:2; + + /** Reserved1 */ + u64 reserved1:2; + + /** QPG Value */ + u64 qpg:11; + + /** Tag Value */ + u64 tag:32; + +#else + + /** Tag Value */ + u64 tag:32; + + /** QPG Value */ + u64 qpg:11; + + /** Reserved1 */ + u64 reserved1:2; + + /** Tag type */ + u64 tagtype:2; + + /** Use Tag Type */ + u64 utt:1; + + /** Skip Length */ + u64 sl:8; + + /** Parse Mode */ + u64 pm:3; + + /** Reserved2 */ + u64 reserved2:1; + + /** Use QPG */ + u64 uqpg:1; + + /** Use Tag */ + u64 utag:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Wider bit */ + u64 w:1; +#endif + +}; + +/** Instruction Header */ +struct octeon_instr_ih2 { +#ifdef __BIG_ENDIAN_BITFIELD + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Packet Order / Work Unit selection (1 of 8)*/ + u64 qos:3; + + /** Core group selection (1 of 16) */ + u64 grp:4; + + /** Short Raw Packet Indicator 1=short raw pkt */ + u64 rs:1; + + /** Tag type */ + u64 tagtype:2; + + /** Tag Value */ + u64 tag:32; +#else + /** Tag Value */ + u64 tag:32; + + /** Tag type */ + u64 tagtype:2; + + /** Short Raw Packet Indicator 1=short raw pkt */ + u64 rs:1; + + /** Core group selection (1 of 16) */ + u64 grp:4; + + /** Packet Order / Work Unit selection (1 of 8)*/ + u64 qos:3; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; +#endif +}; + +/** Input Request Header */ +struct octeon_instr_irh { +#ifdef __BIG_ENDIAN_BITFIELD + u64 opcode:4; + u64 rflag:1; + u64 subcode:7; + u64 vlan:12; + u64 priority:3; + u64 reserved:5; + u64 ossp:32; /* opcode/subcode specific parameters */ +#else + u64 ossp:32; /* opcode/subcode specific parameters */ + u64 reserved:5; + u64 priority:3; + u64 vlan:12; + u64 subcode:7; + u64 rflag:1; + u64 opcode:4; +#endif +}; + +/** Return Data Parameters */ +struct octeon_instr_rdp { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:49; + u64 pcie_port:3; + u64 rlen:12; +#else + u64 rlen:12; + u64 pcie_port:3; + u64 reserved:49; +#endif +}; + +/** Receive Header */ +union octeon_rh { +#ifdef __BIG_ENDIAN_BITFIELD + u64 u64; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 reserved:17; + u64 ossp:32; /** opcode/subcode specific parameters */ + } r; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 extra:28; + u64 vlan:12; + u64 priority:3; + u64 csum_verified:3; /** checksum verified. */ + u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ + u64 encap_on:1; + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ + } r_dh; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 reserved:11; + u64 num_gmx_ports:8; + u64 max_nic_ports:10; + u64 app_cap_flags:4; + u64 app_mode:8; + u64 pkind:8; + } r_core_drv_init; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 reserved:8; + u64 extra:25; + u64 gmxport:16; + } r_nic_info; +#else + u64 u64; + struct { + u64 ossp:32; /** opcode/subcode specific parameters */ + u64 reserved:17; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r; + struct { + u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */ + u64 encap_on:1; + u64 has_hwtstamp:1; /** 1 = has hwtstamp */ + u64 csum_verified:3; /** checksum verified. */ + u64 priority:3; + u64 vlan:12; + u64 extra:28; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_dh; + struct { + u64 pkind:8; + u64 app_mode:8; + u64 app_cap_flags:4; + u64 max_nic_ports:10; + u64 num_gmx_ports:8; + u64 reserved:11; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_core_drv_init; + struct { + u64 gmxport:16; + u64 extra:25; + u64 reserved:8; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_nic_info; +#endif +}; + +#define OCT_RH_SIZE (sizeof(union octeon_rh)) + +union octnic_packet_params { + u32 u32; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 reserved:24; + u32 ip_csum:1; /* Perform IP header checksum(s) */ + /* Perform Outer transport header checksum */ + u32 transport_csum:1; + /* Find tunnel, and perform transport csum. */ + u32 tnl_csum:1; + u32 tsflag:1; /* Timestamp this packet */ + u32 ipsec_ops:4; /* IPsec operation */ +#else + u32 ipsec_ops:4; + u32 tsflag:1; + u32 tnl_csum:1; + u32 transport_csum:1; + u32 ip_csum:1; + u32 reserved:24; +#endif + } s; +}; + +/** Status of a RGMII Link on Octeon as seen by core driver. */ +union oct_link_status { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 duplex:8; + u64 mtu:16; + u64 speed:16; + u64 link_up:1; + u64 autoneg:1; + u64 if_mode:5; + u64 pause:1; + u64 flashing:1; + u64 phy_type:5; + u64 reserved:10; +#else + u64 reserved:10; + u64 phy_type:5; + u64 flashing:1; + u64 pause:1; + u64 if_mode:5; + u64 autoneg:1; + u64 link_up:1; + u64 speed:16; + u64 mtu:16; + u64 duplex:8; +#endif + } s; +}; + +enum lio_phy_type { + LIO_PHY_PORT_TP = 0x0, + LIO_PHY_PORT_FIBRE = 0x1, + LIO_PHY_PORT_UNKNOWN, +}; + +/** The txpciq info passed to host from the firmware */ + +union oct_txpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 port:8; + u64 pkind:6; + u64 use_qpg:1; + u64 qpg:11; + u64 reserved0:10; + u64 ctrl_qpg:11; + u64 reserved:9; +#else + u64 reserved:9; + u64 ctrl_qpg:11; + u64 reserved0:10; + u64 qpg:11; + u64 use_qpg:1; + u64 pkind:6; + u64 port:8; + u64 q_no:8; +#endif + } s; +}; + +/** The rxpciq info passed to host from the firmware */ + +union oct_rxpciq { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 q_no:8; + u64 reserved:56; +#else + u64 reserved:56; + u64 q_no:8; +#endif + } s; +}; + +/** Information for a OCTEON ethernet interface shared between core & host. */ +struct oct_link_info { + union oct_link_status link; + u64 hw_addr; + +#ifdef __BIG_ENDIAN_BITFIELD + u64 gmxport:16; + u64 macaddr_is_admin_asgnd:1; + u64 rsvd:13; + u64 macaddr_spoofchk:1; + u64 rsvd1:17; + u64 num_txpciq:8; + u64 num_rxpciq:8; +#else + u64 num_rxpciq:8; + u64 num_txpciq:8; + u64 rsvd1:17; + u64 macaddr_spoofchk:1; + u64 rsvd:13; + u64 macaddr_is_admin_asgnd:1; + u64 gmxport:16; +#endif + + union oct_txpciq txpciq[MAX_IOQS_PER_NICIF]; + union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF]; +}; + +#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) + +struct liquidio_if_cfg_info { + u64 iqmask; /** mask for IQs enabled for the port */ + u64 oqmask; /** mask for OQs enabled for the port */ + struct oct_link_info linfo; /** initial link information */ + char liquidio_firmware_version[32]; +}; + +/** Stats for each NIC port in RX direction. */ +struct nic_rx_stats { + /* link-level stats */ + u64 total_rcvd; /* Received packets */ + u64 bytes_rcvd; /* Octets of received packets */ + u64 total_bcst; /* Number of non-dropped L2 broadcast packets */ + u64 total_mcst; /* Number of non-dropped L2 multicast packets */ + u64 runts; /* Packets shorter than allowed */ + u64 ctl_rcvd; /* Received PAUSE packets */ + u64 fifo_err; /* Packets dropped due to RX FIFO full */ + u64 dmac_drop; /* Packets dropped by the DMAC filter */ + u64 fcs_err; /* Sum of fragment, overrun, and FCS errors */ + u64 jabber_err; /* Packets larger than allowed */ + u64 l2_err; /* Sum of DMA, parity, PCAM access, no memory, + * buffer overflow, malformed L2 header or + * length, oversize errors + **/ + u64 frame_err; /* Sum of IPv4 and L4 checksum errors */ + u64 red_drops; /* Packets dropped by RED due to buffer + * exhaustion + **/ + + /* firmware stats */ + u64 fw_total_rcvd; + u64 fw_total_fwd; + u64 fw_total_fwd_bytes; + u64 fw_total_mcast; + u64 fw_total_bcast; + + u64 fw_err_pko; + u64 fw_err_link; + u64 fw_err_drop; + u64 fw_rx_vxlan; + u64 fw_rx_vxlan_err; + + /* LRO */ + u64 fw_lro_pkts; /* Number of packets that are LROed */ + u64 fw_lro_octs; /* Number of octets that are LROed */ + u64 fw_total_lro; /* Number of LRO packets formed */ + u64 fw_lro_aborts; /* Number of times LRO of packet aborted */ + u64 fw_lro_aborts_port; + u64 fw_lro_aborts_seq; + u64 fw_lro_aborts_tsval; + u64 fw_lro_aborts_timer; /* Timer setting error */ + /* intrmod: packet forward rate */ + u64 fwd_rate; +}; + +/** Stats for each NIC port in RX direction. */ +struct nic_tx_stats { + /* link-level stats */ + u64 total_pkts_sent; /* Total frames sent on the interface */ + u64 total_bytes_sent; /* Total octets sent on the interface */ + u64 mcast_pkts_sent; /* Packets sent to the multicast DMAC */ + u64 bcast_pkts_sent; /* Packets sent to a broadcast DMAC */ + u64 ctl_sent; /* Control/PAUSE packets sent */ + u64 one_collision_sent; /* Packets sent that experienced a + * single collision before successful + * transmission + **/ + u64 multi_collision_sent; /* Packets sent that experienced + * multiple collisions before successful + * transmission + **/ + u64 max_collision_fail; /* Packets dropped due to excessive + * collisions + **/ + u64 max_deferral_fail; /* Packets not sent due to max + * deferrals + **/ + u64 fifo_err; /* Packets sent that experienced a + * transmit underflow and were + * truncated + **/ + u64 runts; /* Packets sent with an octet count + * lessthan 64 + **/ + u64 total_collisions; /* Packets dropped due to excessive + * collisions + **/ + + /* firmware stats */ + u64 fw_total_sent; + u64 fw_total_fwd; + u64 fw_total_fwd_bytes; + u64 fw_total_mcast_sent; + u64 fw_total_bcast_sent; + u64 fw_err_pko; + u64 fw_err_link; + u64 fw_err_drop; + u64 fw_err_tso; + u64 fw_tso; /* number of tso requests */ + u64 fw_tso_fwd; /* number of packets segmented in tso */ + u64 fw_tx_vxlan; + u64 fw_err_pki; +}; + +struct oct_link_stats { + struct nic_rx_stats fromwire; + struct nic_tx_stats fromhost; + +}; + +static inline int opcode_slow_path(union octeon_rh *rh) +{ + u16 subcode1, subcode2; + + subcode1 = OPCODE_SUBCODE((rh)->r.opcode, (rh)->r.subcode); + subcode2 = OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA); + + return (subcode2 != subcode1); +} + +#define LIO68XX_LED_CTRL_ADDR 0x3501 +#define LIO68XX_LED_CTRL_CFGON 0x1f +#define LIO68XX_LED_CTRL_CFGOFF 0x100 +#define LIO68XX_LED_BEACON_ADDR 0x3508 +#define LIO68XX_LED_BEACON_CFGON 0x47fd +#define LIO68XX_LED_BEACON_CFGOFF 0x11fc +#define VITESSE_PHY_GPIO_DRIVEON 0x1 +#define VITESSE_PHY_GPIO_CFG 0x8 +#define VITESSE_PHY_GPIO_DRIVEOFF 0x4 +#define VITESSE_PHY_GPIO_HIGH 0x2 +#define VITESSE_PHY_GPIO_LOW 0x3 +#define LED_IDENTIFICATION_ON 0x1 +#define LED_IDENTIFICATION_OFF 0x0 +#define LIO23XX_COPPERHEAD_LED_GPIO 0x2 + +struct oct_mdio_cmd { + u64 op; + u64 mdio_addr; + u64 value1; + u64 value2; + u64 value3; +}; + +#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) + +struct oct_intrmod_cfg { + u64 rx_enable; + u64 tx_enable; + u64 check_intrvl; + u64 maxpkt_ratethr; + u64 minpkt_ratethr; + u64 rx_maxcnt_trigger; + u64 rx_mincnt_trigger; + u64 rx_maxtmr_trigger; + u64 rx_mintmr_trigger; + u64 tx_mincnt_trigger; + u64 tx_maxcnt_trigger; + u64 rx_frames; + u64 tx_frames; + u64 rx_usecs; +}; + +#define BASE_QUEUE_NOT_REQUESTED 65535 + +union oct_nic_if_cfg { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 base_queue:16; + u64 num_iqueues:16; + u64 num_oqueues:16; + u64 gmx_port_id:8; + u64 vf_id:8; +#else + u64 vf_id:8; + u64 gmx_port_id:8; + u64 num_oqueues:16; + u64 num_iqueues:16; + u64 base_queue:16; +#endif + } s; +}; + +struct lio_trusted_vf { + uint64_t active: 1; + uint64_t id : 8; + uint64_t reserved: 55; +}; + +struct lio_time { + s64 sec; /* seconds */ + s64 nsec; /* nanoseconds */ +}; + +struct lio_vf_rep_stats { + u64 tx_packets; + u64 tx_bytes; + u64 tx_dropped; + + u64 rx_packets; + u64 rx_bytes; + u64 rx_dropped; +}; + +enum lio_vf_rep_req_type { + LIO_VF_REP_REQ_NONE, + LIO_VF_REP_REQ_STATE, + LIO_VF_REP_REQ_MTU, + LIO_VF_REP_REQ_STATS, + LIO_VF_REP_REQ_DEVNAME +}; + +enum { + LIO_VF_REP_STATE_DOWN, + LIO_VF_REP_STATE_UP +}; + +#define LIO_IF_NAME_SIZE 16 +struct lio_vf_rep_req { + u8 req_type; + u8 ifidx; + u8 rsvd[6]; + + union { + struct lio_vf_rep_name { + char name[LIO_IF_NAME_SIZE]; + } rep_name; + + struct lio_vf_rep_mtu { + u32 mtu; + u32 rsvd; + } rep_mtu; + + struct lio_vf_rep_state { + u8 state; + u8 rsvd[7]; + } rep_state; + }; +}; + +struct lio_vf_rep_resp { + u64 rh; + u8 status; + u8 rsvd[7]; +}; +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h new file mode 100644 index 000000000..5bf5e8791 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h @@ -0,0 +1,54 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#ifndef _LIQUIDIO_IMAGE_H_ +#define _LIQUIDIO_IMAGE_H_ + +#define LIO_MAX_FW_TYPE_LEN (8) +#define LIO_MAX_FW_FILENAME_LEN (256) +#define LIO_FW_DIR "liquidio/" +#define LIO_FW_BASE_NAME "lio_" +#define LIO_FW_NAME_SUFFIX ".bin" +#define LIO_FW_NAME_TYPE_NIC "nic" +#define LIO_FW_NAME_TYPE_AUTO "auto" +#define LIO_FW_NAME_TYPE_NONE "none" +#define LIO_MAX_FIRMWARE_VERSION_LEN 16 + +#define LIO_MAX_BOOTCMD_LEN 1024 +#define LIO_MAX_IMAGES 16 +#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */ +struct octeon_firmware_desc { + __be64 addr; + __be32 len; + __be32 crc32; /* crc32 of image */ +}; + +/* Following the header is a list of 64-bit aligned binary images, + * as described by the desc field. + * Numeric fields are in network byte order. + */ +struct octeon_firmware_file_header { + __be32 magic; + char version[LIO_MAX_FIRMWARE_VERSION_LEN]; + char bootcmd[LIO_MAX_BOOTCMD_LEN]; + __be32 num_images; + struct octeon_firmware_desc desc[LIO_MAX_IMAGES]; + __be32 pad; + __be32 crc32; /* header checksum */ +}; + +#endif /* _LIQUIDIO_IMAGE_H_ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h new file mode 100644 index 000000000..24c212001 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -0,0 +1,472 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file octeon_config.h + * \brief Host Driver: Configuration data structures for the host driver. + */ + +#ifndef __OCTEON_CONFIG_H__ +#define __OCTEON_CONFIG_H__ + +/*--------------------------CONFIG VALUES------------------------*/ + +/* The following macros affect the way the driver data structures + * are generated for Octeon devices. + * They can be modified. + */ + +/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support + * multiple(<= MAX_OCTEON_NICIF) Miniports + */ +#define MAX_OCTEON_NICIF 128 +#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF +#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF +#define MAX_OCTEON_MULTICAST_ADDR 32 + +#define MAX_OCTEON_FILL_COUNT 8 + +/* CN6xxx IQ configuration macros */ +#define CN6XXX_MAX_INPUT_QUEUES 32 +#define CN6XXX_MAX_IQ_DESCRIPTORS 2048 +#define CN6XXX_DB_MIN 1 +#define CN6XXX_DB_MAX 8 +#define CN6XXX_DB_TIMEOUT 1 + +/* CN6xxx OQ configuration macros */ +#define CN6XXX_MAX_OUTPUT_QUEUES 32 +#define CN6XXX_MAX_OQ_DESCRIPTORS 2048 +#define CN6XXX_OQ_BUF_SIZE 1664 +#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \ + (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128) +#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \ + (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128) + +#define CN6XXX_OQ_INTR_PKT 64 +#define CN6XXX_OQ_INTR_TIME 100 +#define DEFAULT_NUM_NIC_PORTS_66XX 2 +#define DEFAULT_NUM_NIC_PORTS_68XX 4 +#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2 + +/* CN23xx IQ configuration macros */ +#define CN23XX_MAX_VFS_PER_PF_PASS_1_0 8 +#define CN23XX_MAX_VFS_PER_PF_PASS_1_1 31 +#define CN23XX_MAX_VFS_PER_PF 63 +#define CN23XX_MAX_RINGS_PER_VF 8 + +#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12 +#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32 +#define CN23XX_MAX_RINGS_PER_PF 64 +#define CN23XX_MAX_RINGS_PER_VF 8 + +#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF +#define CN23XX_MAX_IQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512 +#define CN23XX_MIN_IQ_DESCRIPTORS 128 +#define CN23XX_DB_MIN 1 +#define CN23XX_DB_MAX 8 +#define CN23XX_DB_TIMEOUT 1 + +#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF +#define CN23XX_MAX_OQ_DESCRIPTORS 2048 +#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512 +#define CN23XX_MIN_OQ_DESCRIPTORS 128 +#define CN23XX_OQ_BUF_SIZE 1664 +#define CN23XX_OQ_PKTSPER_INTR 128 +/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/ +#define CN23XX_OQ_REFIL_THRESHOLD 16 + +#define CN23XX_OQ_INTR_PKT 64 +#define CN23XX_OQ_INTR_TIME 100 +#define DEFAULT_NUM_NIC_PORTS_23XX 1 + +#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF +/* PEMs count */ +#define CN23XX_MAX_MACS 4 + +#define CN23XX_DEF_IQ_INTR_THRESHOLD 32 +#define CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD (64 * 1024) +/* common OCTEON configuration macros */ +#define CN6XXX_CFG_IO_QUEUES 32 +#define OCTEON_32BYTE_INSTR 32 +#define OCTEON_64BYTE_INSTR 64 +#define OCTEON_MAX_BASE_IOQ 4 + +#define OCTEON_DMA_INTR_PKT 64 +#define OCTEON_DMA_INTR_TIME 1000 + +#define MAX_TXQS_PER_INTF 8 +#define MAX_RXQS_PER_INTF 8 +#define DEF_TXQS_PER_INTF 4 +#define DEF_RXQS_PER_INTF 4 + +#define INVALID_IOQ_NO 0xff + +#define DEFAULT_POW_GRP 0 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) +#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) + +#define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt) +#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val + +#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs) +#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) +#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val +#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val + +#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt) +#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time) +#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports) +#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs) +#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs) +#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size) + +#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].max_txqs) +#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_txqs) +#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].max_rxqs) +#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_rxqs) +#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_rx_descs) +#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_tx_descs) +#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].rx_buf_size) +#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].base_queue) +#define CFG_GET_GMXID_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].gmx_port_id) + +#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp) +#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \ + ((cfg)->misc.host_link_query_interval) +#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \ + ((cfg)->misc.oct_link_query_interval) +#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) + +#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_rx_descs = value) +#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \ + ((cfg)->nic_if_cfg[idx].num_tx_descs = value) + +/* Max IOQs per OCTEON Link */ +#define MAX_IOQS_PER_NICIF 64 + +enum lio_card_type { + LIO_210SV = 0, /* Two port, 66xx */ + LIO_210NV, /* Two port, 68xx */ + LIO_410NV, /* Four port, 68xx */ + LIO_23XX /* 23xx */ +}; + +#define LIO_210SV_NAME "210sv" +#define LIO_210NV_NAME "210nv" +#define LIO_410NV_NAME "410nv" +#define LIO_23XX_NAME "23xx" + +/** Structure to define the configuration attributes for each Input queue. + * Applicable to all Octeon processors + **/ +struct octeon_iq_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:16; + + /** Tx interrupt packets. Applicable to 23xx only */ + u64 iq_intr_pkt:16; + + /** Minimum ticks to wait before checking for pending instructions. */ + u64 db_timeout:16; + + /** Minimum number of commands pending to be posted to Octeon + * before driver hits the Input queue doorbell. + */ + u64 db_min:8; + + /** Command size - 32 or 64 bytes */ + u64 instr_type:32; + + /** Pending list size (usually set to the sum of the size of all Input + * queues) + */ + u64 pending_list_size:32; + + /* Max number of IQs available */ + u64 max_iqs:8; +#else + /* Max number of IQs available */ + u64 max_iqs:8; + + /** Pending list size (usually set to the sum of the size of all Input + * queues) + */ + u64 pending_list_size:32; + + /** Command size - 32 or 64 bytes */ + u64 instr_type:32; + + /** Minimum number of commands pending to be posted to Octeon + * before driver hits the Input queue doorbell. + */ + u64 db_min:8; + + /** Minimum ticks to wait before checking for pending instructions. */ + u64 db_timeout:16; + + /** Tx interrupt packets. Applicable to 23xx only */ + u64 iq_intr_pkt:16; + + u64 reserved:16; +#endif +}; + +/** Structure to define the configuration attributes for each Output queue. + * Applicable to all Octeon processors + **/ +struct octeon_oq_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:16; + + u64 pkts_per_intr:16; + + /** Interrupt Coalescing (Time Interval). Octeon will interrupt the + * host if atleast one packet was sent in the time interval specified + * by this field. The driver uses time interval interrupt coalescing + * by default. The time is specified in microseconds. + */ + u64 oq_intr_time:16; + + /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver + * usually does not use packet count interrupt coalescing. + */ + u64 oq_intr_pkt:16; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish + * the descriptor ring with new buffers. + */ + u64 refill_threshold:16; + + /* Max number of OQs available */ + u64 max_oqs:8; + +#else + /* Max number of OQs available */ + u64 max_oqs:8; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish + * the descriptor ring with new buffers. + */ + u64 refill_threshold:16; + + /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver + * usually does not use packet count interrupt coalescing. + */ + u64 oq_intr_pkt:16; + + /** Interrupt Coalescing (Time Interval). Octeon will interrupt the + * host if atleast one packet was sent in the time interval specified + * by this field. The driver uses time interval interrupt coalescing + * by default. The time is specified in microseconds. + */ + u64 oq_intr_time:16; + + u64 pkts_per_intr:16; + + u64 reserved:16; +#endif + +}; + +/** This structure conatins the NIC link configuration attributes, + * common for all the OCTEON Modles. + */ +struct octeon_nic_if_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:56; + + u64 base_queue:16; + + u64 gmx_port_id:8; + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + u64 rx_buf_size:16; + + /* Num of desc for tx rings */ + u64 num_tx_descs:16; + + /* Num of desc for rx rings */ + u64 num_rx_descs:16; + + /* Actual configured value. Range could be: 1...max_rxqs */ + u64 num_rxqs:16; + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + u64 max_rxqs:16; + + /* Actual configured value. Range could be: 1...max_txqs */ + u64 num_txqs:16; + + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + u64 max_txqs:16; +#else + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + u64 max_txqs:16; + + /* Actual configured value. Range could be: 1...max_txqs */ + u64 num_txqs:16; + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + u64 max_rxqs:16; + + /* Actual configured value. Range could be: 1...max_rxqs */ + u64 num_rxqs:16; + + /* Num of desc for rx rings */ + u64 num_rx_descs:16; + + /* Num of desc for tx rings */ + u64 num_tx_descs:16; + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + u64 rx_buf_size:16; + + u64 gmx_port_id:8; + + u64 base_queue:16; + + u64 reserved:56; +#endif + +}; + +/** Structure to define the configuration attributes for meta data. + * Applicable to all Octeon processors. + */ + +struct octeon_misc_config { +#ifdef __BIG_ENDIAN_BITFIELD + /** Host link status polling period */ + u64 host_link_query_interval:32; + /** Oct link status polling period */ + u64 oct_link_query_interval:32; + + u64 enable_sli_oq_bp:1; + /** Control IQ Group */ + u64 ctrlq_grp:4; +#else + /** Control IQ Group */ + u64 ctrlq_grp:4; + /** BP for SLI OQ */ + u64 enable_sli_oq_bp:1; + /** Host link status polling period */ + u64 oct_link_query_interval:32; + /** Oct link status polling period */ + u64 host_link_query_interval:32; +#endif +}; + +/** Structure to define the configuration for all OCTEON processors. */ +struct octeon_config { + u16 card_type; + char *card_name; + + /** Input Queue attributes. */ + struct octeon_iq_config iq; + + /** Output Queue attributes. */ + struct octeon_oq_config oq; + + /** NIC Port Configuration */ + struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF]; + + /** Miscellaneous attributes */ + struct octeon_misc_config misc; + + int num_nic_ports; + + int num_def_tx_descs; + + /* Num of desc for rx rings */ + int num_def_rx_descs; + + int def_rx_buf_size; + +}; + +/* The following config values are fixed and should not be modified. */ + +#define BAR1_INDEX_DYNAMIC_MAP 2 +#define BAR1_INDEX_STATIC_MAP 15 +#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) + +#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE) + +/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking + * 1 process done list, 1 zombie lists(timeouted sc list) + * NoResponse Lists are now maintained with each IQ. (Dec' 2007). + */ +#define MAX_RESPONSE_LISTS 6 + +/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the + * dispatch table. + */ +#define OPCODE_MASK_BITS 6 + +/* Mask for the 6-bit lookup hash */ +#define OCTEON_OPCODE_MASK 0x3f + +/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */ +#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) + +/* Maximum number of Octeon Instruction (command) queues */ +#define MAX_OCTEON_INSTR_QUEUES(oct) \ + (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \ + CN6XXX_MAX_INPUT_QUEUES) + +/* Maximum number of Octeon Instruction (command) queues */ +#define MAX_OCTEON_OUTPUT_QUEUES(oct) \ + (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \ + CN6XXX_MAX_OUTPUT_QUEUES) + +#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES +#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES + +#define MAX_POSSIBLE_VFS 64 + +#endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c new file mode 100644 index 000000000..28feabec8 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -0,0 +1,920 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/* + * @file octeon_console.c + */ +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/crc32.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +static void octeon_remote_lock(void); +static void octeon_remote_unlock(void); +static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, + const char *name, + u32 flags); +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size); + +#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 +#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 +#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000 +#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100 +#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248 + +#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001 +#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002 + +/** Can change without breaking ABI */ +#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64 + +/** minimum alignment of bootmem alloced blocks */ +#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull) + +/** CVMX bootmem descriptor major version */ +#define CVMX_BOOTMEM_DESC_MAJ_VER 3 +/* CVMX bootmem descriptor minor version */ +#define CVMX_BOOTMEM_DESC_MIN_VER 0 + +/* Current versions */ +#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1 +#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0 +#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console" +#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */ + +/* First three members of cvmx_bootmem_desc are left in original + * positions for backwards compatibility. + * Assumes big endian target + */ +struct cvmx_bootmem_desc { + /** spinlock to control access to list */ + u32 lock; + + /** flags for indicating various conditions */ + u32 flags; + + u64 head_addr; + + /** incremented changed when incompatible changes made */ + u32 major_version; + + /** incremented changed when compatible changes made, + * reset to zero when major incremented + */ + u32 minor_version; + + u64 app_data_addr; + u64 app_data_size; + + /** number of elements in named blocks array */ + u32 nb_num_blocks; + + /** length of name array in bootmem blocks */ + u32 named_block_name_len; + + /** address of named memory block descriptors */ + u64 named_block_array_addr; +}; + +/* Structure that defines a single console. + * + * Note: when read_index == write_index, the buffer is empty. + * The actual usable size of each console is console_buf_size -1; + */ +struct octeon_pci_console { + u64 input_base_addr; + u32 input_read_index; + u32 input_write_index; + u64 output_base_addr; + u32 output_read_index; + u32 output_write_index; + u32 lock; + u32 buf_size; +}; + +/* This is the main container structure that contains all the information + * about all PCI consoles. The address of this structure is passed to various + * routines that operation on PCI consoles. + */ +struct octeon_pci_console_desc { + u32 major_version; + u32 minor_version; + u32 lock; + u32 flags; + u32 num_consoles; + u32 pad; + /* must be 64 bit aligned here... */ + /* Array of addresses of octeon_pci_console structures */ + u64 console_addr_array[]; + /* Implicit storage for console_addr_array */ +}; + +/* + * This function is the implementation of the get macros defined + * for individual structure members. The argument are generated + * by the macros inorder to read only the needed memory. + * + * @param oct Pointer to current octeon device + * @param base 64bit physical address of the complete structure + * @param offset Offset from the beginning of the structure to the member being + * accessed. + * @param size Size of the structure member. + * + * @return Value of the structure member promoted into a u64. + */ +static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct, + u64 base, + u32 offset, + u32 size) +{ + base = (1ull << 63) | (base + offset); + switch (size) { + case 4: + return octeon_read_device_mem32(oct, base); + case 8: + return octeon_read_device_mem64(oct, base); + default: + return 0; + } +} + +/* + * This function retrieves the string name of a named block. It is + * more complicated than a simple memcpy() since the named block + * descriptor may not be directly accessible. + * + * @param addr Physical address of the named block descriptor + * @param str String to receive the named block string name + * @param len Length of the string buffer, which must match the length + * stored in the bootmem descriptor. + */ +static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, + u64 addr, + char *str, + u32 len) +{ + addr += offsetof(struct cvmx_bootmem_named_block_desc, name); + octeon_pci_read_core_mem(oct, addr, (u8 *)str, len); + str[len] = 0; +} + +/* See header file for descriptions of functions */ + +/* + * Check the version information on the bootmem descriptor + * + * @param exact_match + * Exact major version to check against. A zero means + * check that the version supports named blocks. + * + * @return Zero if the version is correct. Negative if the version is + * incorrect. Failures also cause a message to be displayed. + */ +static int __cvmx_bootmem_check_version(struct octeon_device *oct, + u32 exact_match) +{ + u32 major_version; + u32 minor_version; + + if (!oct->bootmem_desc_addr) + oct->bootmem_desc_addr = + octeon_read_device_mem64(oct, + BOOTLOADER_PCI_READ_DESC_ADDR); + major_version = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, major_version), + sizeof_field(struct cvmx_bootmem_desc, major_version)); + minor_version = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, minor_version), + sizeof_field(struct cvmx_bootmem_desc, minor_version)); + + dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__, + major_version); + if ((major_version > 3) || + (exact_match && major_version != exact_match)) { + dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n", + major_version, minor_version, + (long long)oct->bootmem_desc_addr); + return -1; + } else { + return 0; + } +} + +static const struct cvmx_bootmem_named_block_desc +*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct, + const char *name, u32 flags) +{ + struct cvmx_bootmem_named_block_desc *desc = + &oct->bootmem_named_block_desc; + u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags); + + if (named_addr) { + desc->base_addr = __cvmx_bootmem_desc_get( + oct, named_addr, + offsetof(struct cvmx_bootmem_named_block_desc, + base_addr), + sizeof_field( + struct cvmx_bootmem_named_block_desc, + base_addr)); + desc->size = __cvmx_bootmem_desc_get(oct, named_addr, + offsetof(struct cvmx_bootmem_named_block_desc, + size), + sizeof_field( + struct cvmx_bootmem_named_block_desc, + size)); + + strncpy(desc->name, name, sizeof(desc->name)); + desc->name[sizeof(desc->name) - 1] = 0; + return &oct->bootmem_named_block_desc; + } else { + return NULL; + } +} + +static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, + const char *name, + u32 flags) +{ + u64 result = 0; + + if (!__cvmx_bootmem_check_version(oct, 3)) { + u32 i; + + u64 named_block_array_addr = __cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + named_block_array_addr), + sizeof_field(struct cvmx_bootmem_desc, + named_block_array_addr)); + u32 num_blocks = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + nb_num_blocks), + sizeof_field(struct cvmx_bootmem_desc, + nb_num_blocks)); + + u32 name_length = (u32)__cvmx_bootmem_desc_get( + oct, oct->bootmem_desc_addr, + offsetof(struct cvmx_bootmem_desc, + named_block_name_len), + sizeof_field(struct cvmx_bootmem_desc, + named_block_name_len)); + + u64 named_addr = named_block_array_addr; + + for (i = 0; i < num_blocks; i++) { + u64 named_size = __cvmx_bootmem_desc_get( + oct, named_addr, + offsetof( + struct cvmx_bootmem_named_block_desc, + size), + sizeof_field( + struct cvmx_bootmem_named_block_desc, + size)); + + if (name && named_size) { + char *name_tmp = + kmalloc(name_length + 1, GFP_KERNEL); + if (!name_tmp) + break; + + CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr, + name_tmp, + name_length); + if (!strncmp(name, name_tmp, name_length)) { + result = named_addr; + kfree(name_tmp); + break; + } + kfree(name_tmp); + } else if (!name && !named_size) { + result = named_addr; + break; + } + + named_addr += + sizeof(struct cvmx_bootmem_named_block_desc); + } + } + return result; +} + +/* + * Find a named block on the remote Octeon + * + * @param name Name of block to find + * @param base_addr Address the block is at (OUTPUT) + * @param size The size of the block (OUTPUT) + * + * @return Zero on success, One on failure. + */ +static int octeon_named_block_find(struct octeon_device *oct, const char *name, + u64 *base_addr, u64 *size) +{ + const struct cvmx_bootmem_named_block_desc *named_block; + + octeon_remote_lock(); + named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0); + octeon_remote_unlock(); + if (named_block) { + *base_addr = named_block->base_addr; + *size = named_block->size; + return 0; + } + return 1; +} + +static void octeon_remote_lock(void) +{ + /* fill this in if any sharing is needed */ +} + +static void octeon_remote_unlock(void) +{ + /* fill this in if any sharing is needed */ +} + +int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, + u32 wait_hundredths) +{ + u32 len = (u32)strlen(cmd_str); + + dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str); + + if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) { + dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n", + BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1); + return -1; + } + + if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) { + dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n"); + return -1; + } + + /* Write command to bootloader */ + octeon_remote_lock(); + octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR, + (u8 *)cmd_str, len); + octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR, + len); + octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR, + OCTEON_PCI_IO_BUF_OWNER_OCTEON); + + /* Bootloader should accept command very quickly + * if it really was ready + */ + if (octeon_wait_for_bootloader(oct, 200) != 0) { + octeon_remote_unlock(); + dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n"); + return -1; + } + octeon_remote_unlock(); + return 0; +} + +int octeon_wait_for_bootloader(struct octeon_device *oct, + u32 wait_time_hundredths) +{ + dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n", + wait_time_hundredths); + + if (octeon_mem_access_ok(oct)) + return -1; + + while (wait_time_hundredths > 0 && + octeon_read_device_mem32(oct, + BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR) + != OCTEON_PCI_IO_BUF_OWNER_HOST) { + if (--wait_time_hundredths <= 0) + return -1; + schedule_timeout_uninterruptible(HZ / 100); + } + return 0; +} + +static void octeon_console_handle_result(struct octeon_device *oct, + size_t console_num) +{ + struct octeon_console *console; + + console = &oct->console[console_num]; + + console->waiting = 0; +} + +static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES]; + +static void output_console_line(struct octeon_device *oct, + struct octeon_console *console, + size_t console_num, + char *console_buffer, + s32 bytes_read) +{ + char *line; + s32 i; + size_t len; + + line = console_buffer; + for (i = 0; i < bytes_read; i++) { + /* Output a line at a time, prefixed */ + if (console_buffer[i] == '\n') { + console_buffer[i] = '\0'; + /* We need to output 'line', prefaced by 'leftover'. + * However, it is possible we're being called to + * output 'leftover' by itself (in the case of nothing + * having been read from the console). + * + * To avoid duplication, check for this condition. + */ + if (console->leftover[0] && + (line != console->leftover)) { + if (console->print) + (*console->print)(oct, (u32)console_num, + console->leftover, + line); + console->leftover[0] = '\0'; + } else { + if (console->print) + (*console->print)(oct, (u32)console_num, + line, NULL); + } + line = &console_buffer[i + 1]; + } + } + + /* Save off any leftovers */ + if (line != &console_buffer[bytes_read]) { + console_buffer[bytes_read] = '\0'; + len = strlen(console->leftover); + strncpy(&console->leftover[len], line, + sizeof(console->leftover) - len); + } +} + +static void check_console(struct work_struct *work) +{ + s32 bytes_read, tries, total_read; + size_t len; + struct octeon_console *console; + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + u32 console_num = (u32)wk->ctxul; + u32 delay; + + console = &oct->console[console_num]; + tries = 0; + total_read = 0; + + do { + /* Take console output regardless of whether it will + * be logged + */ + bytes_read = + octeon_console_read(oct, console_num, console_buffer, + sizeof(console_buffer) - 1); + if (bytes_read > 0) { + total_read += bytes_read; + if (console->waiting) + octeon_console_handle_result(oct, console_num); + if (console->print) { + output_console_line(oct, console, console_num, + console_buffer, bytes_read); + } + } else if (bytes_read < 0) { + dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n", + console_num, bytes_read); + } + + tries++; + } while ((bytes_read > 0) && (tries < 16)); + + /* If nothing is read after polling the console, + * output any leftovers if any + */ + if (console->print && (total_read == 0) && + (console->leftover[0])) { + /* append '\n' as terminator for 'output_console_line' */ + len = strlen(console->leftover); + console->leftover[len] = '\n'; + output_console_line(oct, console, console_num, + console->leftover, (s32)(len + 1)); + console->leftover[0] = '\0'; + } + + delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; + + schedule_delayed_work(&wk->work, msecs_to_jiffies(delay)); +} + +int octeon_init_consoles(struct octeon_device *oct) +{ + int ret = 0; + u64 addr, size; + + ret = octeon_mem_access_ok(oct); + if (ret) { + dev_err(&oct->pci_dev->dev, "Memory access not okay'\n"); + return ret; + } + + ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr, + &size); + if (ret) { + dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n", + OCTEON_PCI_CONSOLE_BLOCK_NAME); + return ret; + } + + /* Dedicate one of Octeon's BAR1 index registers to create a static + * mapping to a region of Octeon DRAM that contains the PCI console + * named block. + */ + oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP; + oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index, + true); + oct->console_nb_info.dram_region_base = addr + & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL); + + /* num_consoles > 0, is an indication that the consoles + * are accessible + */ + oct->num_consoles = octeon_read_device_mem32(oct, + addr + offsetof(struct octeon_pci_console_desc, + num_consoles)); + oct->console_desc_addr = addr; + + dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n", + oct->num_consoles); + + return ret; +} + +static void octeon_get_uboot_version(struct octeon_device *oct) +{ + s32 bytes_read, tries, total_read; + struct octeon_console *console; + u32 console_num = 0; + char *uboot_ver; + char *buf; + char *p; + +#define OCTEON_UBOOT_VER_BUF_SIZE 512 + buf = kmalloc(OCTEON_UBOOT_VER_BUF_SIZE, GFP_KERNEL); + if (!buf) + return; + + if (octeon_console_send_cmd(oct, "setenv stdout pci\n", 50)) { + kfree(buf); + return; + } + + if (octeon_console_send_cmd(oct, "version\n", 1)) { + kfree(buf); + return; + } + + console = &oct->console[console_num]; + tries = 0; + total_read = 0; + + do { + /* Take console output regardless of whether it will + * be logged + */ + bytes_read = + octeon_console_read(oct, + console_num, buf + total_read, + OCTEON_UBOOT_VER_BUF_SIZE - 1 - + total_read); + if (bytes_read > 0) { + buf[bytes_read] = '\0'; + + total_read += bytes_read; + if (console->waiting) + octeon_console_handle_result(oct, console_num); + } else if (bytes_read < 0) { + dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n", + console_num, bytes_read); + } + + tries++; + } while ((bytes_read > 0) && (tries < 16)); + + /* If nothing is read after polling the console, + * output any leftovers if any + */ + if ((total_read == 0) && (console->leftover[0])) { + dev_dbg(&oct->pci_dev->dev, "%u: %s\n", + console_num, console->leftover); + console->leftover[0] = '\0'; + } + + buf[OCTEON_UBOOT_VER_BUF_SIZE - 1] = '\0'; + + uboot_ver = strstr(buf, "U-Boot"); + if (uboot_ver) { + p = strstr(uboot_ver, "mips"); + if (p) { + p--; + *p = '\0'; + dev_info(&oct->pci_dev->dev, "%s\n", uboot_ver); + } + } + + kfree(buf); + octeon_console_send_cmd(oct, "setenv stdout serial\n", 50); +} + +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb) +{ + int ret = 0; + u32 delay; + u64 coreaddr; + struct delayed_work *work; + struct octeon_console *console; + + if (console_num >= oct->num_consoles) { + dev_err(&oct->pci_dev->dev, + "trying to read from console number %d when only 0 to %d exist\n", + console_num, oct->num_consoles); + } else { + console = &oct->console[console_num]; + + console->waiting = 0; + + coreaddr = oct->console_desc_addr + console_num * 8 + + offsetof(struct octeon_pci_console_desc, + console_addr_array); + console->addr = octeon_read_device_mem64(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + buf_size); + console->buffer_size = octeon_read_device_mem32(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + input_base_addr); + console->input_base_addr = + octeon_read_device_mem64(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + output_base_addr); + console->output_base_addr = + octeon_read_device_mem64(oct, coreaddr); + console->leftover[0] = '\0'; + + work = &oct->console_poll_work[console_num].work; + + octeon_get_uboot_version(oct); + + INIT_DELAYED_WORK(work, check_console); + oct->console_poll_work[console_num].ctxptr = (void *)oct; + oct->console_poll_work[console_num].ctxul = console_num; + delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; + schedule_delayed_work(work, msecs_to_jiffies(delay)); + + /* an empty string means use default debug console enablement */ + if (dbg_enb && !dbg_enb[0]) + dbg_enb = "setenv pci_console_active 1"; + if (dbg_enb) + ret = octeon_console_send_cmd(oct, dbg_enb, 2000); + + console->active = 1; + } + + return ret; +} + +/* + * Removes all consoles + * + * @param oct octeon device + */ +void octeon_remove_consoles(struct octeon_device *oct) +{ + u32 i; + struct octeon_console *console; + + for (i = 0; i < oct->num_consoles; i++) { + console = &oct->console[i]; + + if (!console->active) + continue; + + cancel_delayed_work_sync(&oct->console_poll_work[i]. + work); + console->addr = 0; + console->buffer_size = 0; + console->input_base_addr = 0; + console->output_base_addr = 0; + } + + oct->num_consoles = 0; +} + +static inline int octeon_console_free_bytes(u32 buffer_size, + u32 wr_idx, + u32 rd_idx) +{ + if (rd_idx >= buffer_size || wr_idx >= buffer_size) + return -1; + + return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size; +} + +static inline int octeon_console_avail_bytes(u32 buffer_size, + u32 wr_idx, + u32 rd_idx) +{ + if (rd_idx >= buffer_size || wr_idx >= buffer_size) + return -1; + + return buffer_size - 1 - + octeon_console_free_bytes(buffer_size, wr_idx, rd_idx); +} + +static int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size) +{ + int bytes_to_read; + u32 rd_idx, wr_idx; + struct octeon_console *console; + + if (console_num >= oct->num_consoles) { + dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n", + console_num); + return 0; + } + + console = &oct->console[console_num]; + + /* Check to see if any data is available. + * Maybe optimize this with 64-bit read. + */ + rd_idx = octeon_read_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, output_read_index)); + wr_idx = octeon_read_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, output_write_index)); + + bytes_to_read = octeon_console_avail_bytes(console->buffer_size, + wr_idx, rd_idx); + if (bytes_to_read <= 0) + return bytes_to_read; + + bytes_to_read = min_t(s32, bytes_to_read, buf_size); + + /* Check to see if what we want to read is not contiguous, and limit + * ourselves to the contiguous block + */ + if (rd_idx + bytes_to_read >= console->buffer_size) + bytes_to_read = console->buffer_size - rd_idx; + + octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx, + (u8 *)buffer, bytes_to_read); + octeon_write_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, + output_read_index), + (rd_idx + bytes_to_read) % + console->buffer_size); + + return bytes_to_read; +} + +#define FBUF_SIZE (4 * 1024 * 1024) +#define MAX_BOOTTIME_SIZE 80 + +int octeon_download_firmware(struct octeon_device *oct, const u8 *data, + size_t size) +{ + struct octeon_firmware_file_header *h; + char boottime[MAX_BOOTTIME_SIZE]; + struct timespec64 ts; + u32 crc32_result; + u64 load_addr; + u32 image_len; + int ret = 0; + u32 i, rem; + + if (size < sizeof(struct octeon_firmware_file_header)) { + dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", + (u32)size, + (u32)sizeof(struct octeon_firmware_file_header)); + return -EINVAL; + } + + h = (struct octeon_firmware_file_header *)data; + + if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) { + dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n"); + return -EINVAL; + } + + crc32_result = crc32((unsigned int)~0, data, + sizeof(struct octeon_firmware_file_header) - + sizeof(u32)) ^ ~0U; + if (crc32_result != be32_to_cpu(h->crc32)) { + dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", + crc32_result, be32_to_cpu(h->crc32)); + return -EINVAL; + } + + if (memcmp(LIQUIDIO_BASE_VERSION, h->version, + strlen(LIQUIDIO_BASE_VERSION))) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n", + LIQUIDIO_BASE_VERSION, + h->version); + return -EINVAL; + } + + if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) { + dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n", + be32_to_cpu(h->num_images)); + return -EINVAL; + } + + dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version); + snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", + h->version); + + data += sizeof(struct octeon_firmware_file_header); + + dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__, + be32_to_cpu(h->num_images)); + /* load all images */ + for (i = 0; i < be32_to_cpu(h->num_images); i++) { + load_addr = be64_to_cpu(h->desc[i].addr); + image_len = be32_to_cpu(h->desc[i].len); + + dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n", + image_len, load_addr); + + /* Write in 4MB chunks*/ + rem = image_len; + + while (rem) { + if (rem < FBUF_SIZE) + size = rem; + else + size = FBUF_SIZE; + + /* download the image */ + octeon_pci_write_core_mem(oct, load_addr, data, (u32)size); + + data += size; + rem -= (u32)size; + load_addr += size; + } + } + + /* Pass date and time information to NIC at the time of loading + * firmware and periodically update the host time to NIC firmware. + * This is to make NIC firmware use the same time reference as Host, + * so that it is easy to correlate logs from firmware and host for + * debugging. + * + * Octeon always uses UTC time. so timezone information is not sent. + */ + ktime_get_real_ts64(&ts); + ret = snprintf(boottime, MAX_BOOTTIME_SIZE, + " time_sec=%lld time_nsec=%ld", + (s64)ts.tv_sec, ts.tv_nsec); + if ((sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))) < + ret) { + dev_err(&oct->pci_dev->dev, "Boot command buffer too small\n"); + return -EINVAL; + } + strncat(h->bootcmd, boottime, + sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))); + + dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n", + h->bootcmd); + + /* Invoke the bootcmd */ + ret = octeon_console_send_cmd(oct, h->bootcmd, 50); + if (ret) + dev_info(&oct->pci_dev->dev, "Boot command send failed\n"); + + return ret; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c new file mode 100644 index 000000000..e159194d0 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -0,0 +1,1464 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" + +/** Default configuration + * for CN66XX OCTEON Models. + */ +static struct octeon_config default_cn66xx_conf = { + .card_type = LIO_210SV, + .card_name = LIO_210SV_NAME, + + /** IQ attributes */ + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + /* For ethernet interface 0: Port cfg Attributes */ + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +/** Default configuration + * for CN68XX OCTEON Model. + */ + +static struct octeon_config default_cn68xx_conf = { + .card_type = LIO_410NV, + .card_name = LIO_410NV_NAME, + + /** IQ attributes */ + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + .nic_if_cfg[2] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 2, + }, + + .nic_if_cfg[3] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 3, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +/** Default configuration + * for CN68XX OCTEON Model. + */ +static struct octeon_config default_cn68xx_210nv_conf = { + .card_type = LIO_210NV, + .card_name = LIO_210NV_NAME, + + /** IQ attributes */ + + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +static struct octeon_config default_cn23xx_conf = { + .card_type = LIO_23XX, + .card_name = LIO_23XX_NAME, + /** IQ attributes */ + .iq = { + .max_iqs = CN23XX_CFG_IO_QUEUES, + .pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS * + CN23XX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN23XX_DB_MIN, + .db_timeout = CN23XX_DB_TIMEOUT, + .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD, + }, + + /** OQ attributes */ + .oq = { + .max_oqs = CN23XX_CFG_IO_QUEUES, + .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR, + .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN23XX_OQ_INTR_PKT, + .oq_intr_time = CN23XX_OQ_INTR_TIME, + }, + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX, + .num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, + .num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, + .def_rx_buf_size = CN23XX_OQ_BUF_SIZE, + + /* For ethernet interface 0: Port cfg Attributes */ + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN23XX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN23XX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } +}; + +static struct octeon_config_ptr { + u32 conf_type; +} oct_conf_info[MAX_OCTEON_DEVICES] = { + { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, +}; + +static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { + "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", + "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE", + "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", + "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", + "INVALID" +}; + +static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = { + "BASE", "NIC", "UNKNOWN"}; + +static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES]; +static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES]; +static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES]; + +static u32 octeon_device_count; +/* locks device array (i.e. octeon_device[]) */ +static DEFINE_SPINLOCK(octeon_devices_lock); + +static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES]; + +static void oct_set_config_info(int oct_id, int conf_type) +{ + if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1)) + conf_type = OCTEON_CONFIG_TYPE_DEFAULT; + oct_conf_info[oct_id].conf_type = conf_type; +} + +void octeon_init_device_list(int conf_type) +{ + int i; + + memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES)); + for (i = 0; i < MAX_OCTEON_DEVICES; i++) + oct_set_config_info(i, conf_type); +} + +static void *__retrieve_octeon_config_info(struct octeon_device *oct, + u16 card_type) +{ + u32 oct_id = oct->octeon_id; + void *ret = NULL; + + switch (oct_conf_info[oct_id].conf_type) { + case OCTEON_CONFIG_TYPE_DEFAULT: + if (oct->chip_id == OCTEON_CN66XX) { + ret = &default_cn66xx_conf; + } else if ((oct->chip_id == OCTEON_CN68XX) && + (card_type == LIO_210NV)) { + ret = &default_cn68xx_210nv_conf; + } else if ((oct->chip_id == OCTEON_CN68XX) && + (card_type == LIO_410NV)) { + ret = &default_cn68xx_conf; + } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { + ret = &default_cn23xx_conf; + } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { + ret = &default_cn23xx_conf; + } + break; + default: + break; + } + return ret; +} + +static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) +{ + switch (oct->chip_id) { + case OCTEON_CN66XX: + case OCTEON_CN68XX: + return lio_validate_cn6xxx_config_info(oct, conf); + case OCTEON_CN23XX_PF_VID: + case OCTEON_CN23XX_VF_VID: + return 0; + default: + break; + } + + return 1; +} + +void *oct_get_config_info(struct octeon_device *oct, u16 card_type) +{ + void *conf = NULL; + + conf = __retrieve_octeon_config_info(oct, card_type); + if (!conf) + return NULL; + + if (__verify_octeon_config_info(oct, conf)) { + dev_err(&oct->pci_dev->dev, "Configuration verification failed\n"); + return NULL; + } + + return conf; +} + +char *lio_get_state_string(atomic_t *state_ptr) +{ + s32 istate = (s32)atomic_read(state_ptr); + + if (istate > OCT_DEV_STATES || istate < 0) + return oct_dev_state_str[OCT_DEV_STATE_INVALID]; + return oct_dev_state_str[istate]; +} + +static char *get_oct_app_string(u32 app_mode) +{ + if (app_mode <= CVM_DRV_APP_END) + return oct_dev_app_str[app_mode - CVM_DRV_APP_START]; + return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; +} + +void octeon_free_device_mem(struct octeon_device *oct) +{ + int i; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { + if (oct->io_qmask.oq & BIT_ULL(i)) + vfree(oct->droq[i]); + } + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (oct->io_qmask.iq & BIT_ULL(i)) + vfree(oct->instr_queue[i]); + } + + i = oct->octeon_id; + vfree(oct); + + octeon_device[i] = NULL; + octeon_device_count--; +} + +static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, + u32 priv_size) +{ + struct octeon_device *oct; + u8 *buf = NULL; + u32 octdevsize = 0, configsize = 0, size; + + switch (pci_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + configsize = sizeof(struct octeon_cn6xxx); + break; + + case OCTEON_CN23XX_PF_VID: + configsize = sizeof(struct octeon_cn23xx_pf); + break; + case OCTEON_CN23XX_VF_VID: + configsize = sizeof(struct octeon_cn23xx_vf); + break; + default: + pr_err("%s: Unknown PCI Device: 0x%x\n", + __func__, + pci_id); + return NULL; + } + + if (configsize & 0x7) + configsize += (8 - (configsize & 0x7)); + + octdevsize = sizeof(struct octeon_device); + if (octdevsize & 0x7) + octdevsize += (8 - (octdevsize & 0x7)); + + if (priv_size & 0x7) + priv_size += (8 - (priv_size & 0x7)); + + size = octdevsize + priv_size + configsize + + (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); + + buf = vzalloc(size); + if (!buf) + return NULL; + + oct = (struct octeon_device *)buf; + oct->priv = (void *)(buf + octdevsize); + oct->chip = (void *)(buf + octdevsize + priv_size); + oct->dispatch.dlist = (struct octeon_dispatch *) + (buf + octdevsize + priv_size + configsize); + + return oct; +} + +struct octeon_device *octeon_allocate_device(u32 pci_id, + u32 priv_size) +{ + u32 oct_idx = 0; + struct octeon_device *oct = NULL; + + spin_lock(&octeon_devices_lock); + + for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++) + if (!octeon_device[oct_idx]) + break; + + if (oct_idx < MAX_OCTEON_DEVICES) { + oct = octeon_allocate_device_mem(pci_id, priv_size); + if (oct) { + octeon_device_count++; + octeon_device[oct_idx] = oct; + } + } + + spin_unlock(&octeon_devices_lock); + if (!oct) + return NULL; + + spin_lock_init(&oct->pci_win_lock); + spin_lock_init(&oct->mem_access_lock); + + oct->octeon_id = oct_idx; + snprintf(oct->device_name, sizeof(oct->device_name), + "LiquidIO%d", (oct->octeon_id)); + + return oct; +} + +/** Register a device's bus location at initialization time. + * @param octeon_dev - pointer to the octeon device structure. + * @param bus - PCIe bus # + * @param dev - PCIe device # + * @param func - PCIe function # + * @param is_pf - TRUE for PF, FALSE for VF + * @return reference count of device's adapter + */ +int octeon_register_device(struct octeon_device *oct, + int bus, int dev, int func, int is_pf) +{ + int idx, refcount; + + oct->loc.bus = bus; + oct->loc.dev = dev; + oct->loc.func = func; + + oct->adapter_refcount = &adapter_refcounts[oct->octeon_id]; + atomic_set(oct->adapter_refcount, 0); + + /* Like the reference count, the f/w state is shared 'per-adapter' */ + oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id]; + atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED); + + spin_lock(&octeon_devices_lock); + for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) { + if (!octeon_device[idx]) { + dev_err(&oct->pci_dev->dev, + "%s: Internal driver error, missing dev", + __func__); + spin_unlock(&octeon_devices_lock); + atomic_inc(oct->adapter_refcount); + return 1; /* here, refcount is guaranteed to be 1 */ + } + /* If another device is at same bus/dev, use its refcounter + * (and f/w state variable). + */ + if ((octeon_device[idx]->loc.bus == bus) && + (octeon_device[idx]->loc.dev == dev)) { + oct->adapter_refcount = + octeon_device[idx]->adapter_refcount; + oct->adapter_fw_state = + octeon_device[idx]->adapter_fw_state; + break; + } + } + spin_unlock(&octeon_devices_lock); + + atomic_inc(oct->adapter_refcount); + refcount = atomic_read(oct->adapter_refcount); + + dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__, + oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); + + return refcount; +} + +/** Deregister a device at de-initialization time. + * @param octeon_dev - pointer to the octeon device structure. + * @return reference count of device's adapter + */ +int octeon_deregister_device(struct octeon_device *oct) +{ + int refcount; + + atomic_dec(oct->adapter_refcount); + refcount = atomic_read(oct->adapter_refcount); + + dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__, + oct->loc.bus, oct->loc.dev, oct->loc.func, refcount); + + return refcount; +} + +int +octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs) +{ + struct octeon_ioq_vector *ioq_vector; + int cpu_num; + int size; + int i; + + size = sizeof(struct octeon_ioq_vector) * num_ioqs; + + oct->ioq_vector = vzalloc(size); + if (!oct->ioq_vector) + return -1; + for (i = 0; i < num_ioqs; i++) { + ioq_vector = &oct->ioq_vector[i]; + ioq_vector->oct_dev = oct; + ioq_vector->iq_index = i; + ioq_vector->droq_index = i; + ioq_vector->mbox = oct->mbox[i]; + + cpu_num = i % num_online_cpus(); + cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask); + + if (oct->chip_id == OCTEON_CN23XX_PF_VID) + ioq_vector->ioq_num = i + oct->sriov_info.pf_srn; + else + ioq_vector->ioq_num = i; + } + + return 0; +} + +void +octeon_free_ioq_vector(struct octeon_device *oct) +{ + vfree(oct->ioq_vector); +} + +/* this function is only for setting up the first queue */ +int octeon_setup_instr_queues(struct octeon_device *oct) +{ + u32 num_descs = 0; + u32 iq_no = 0; + union oct_txpciq txpciq; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + if (OCTEON_CN6XXX(oct)) + num_descs = + CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx)); + else if (OCTEON_CN23XX_PF(oct)) + num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf)); + else if (OCTEON_CN23XX_VF(oct)) + num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf)); + + oct->num_iqs = 0; + + oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]), + numa_node); + if (!oct->instr_queue[0]) + oct->instr_queue[0] = + vzalloc(sizeof(struct octeon_instr_queue)); + if (!oct->instr_queue[0]) + return 1; + memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue)); + oct->instr_queue[0]->q_index = 0; + oct->instr_queue[0]->app_ctx = (void *)(size_t)0; + oct->instr_queue[0]->ifidx = 0; + txpciq.u64 = 0; + txpciq.s.q_no = iq_no; + txpciq.s.pkind = oct->pfvf_hsword.pkind; + txpciq.s.use_qpg = 0; + txpciq.s.qpg = 0; + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { + /* prevent memory leak */ + vfree(oct->instr_queue[0]); + oct->instr_queue[0] = NULL; + return 1; + } + + oct->num_iqs++; + return 0; +} + +int octeon_setup_output_queues(struct octeon_device *oct) +{ + u32 num_descs = 0; + u32 desc_size = 0; + u32 oq_no = 0; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + if (OCTEON_CN6XXX(oct)) { + num_descs = + CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx)); + desc_size = + CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx)); + } else if (OCTEON_CN23XX_PF(oct)) { + num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf)); + desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf)); + } else if (OCTEON_CN23XX_VF(oct)) { + num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf)); + desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf)); + } + oct->num_oqs = 0; + oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node); + if (!oct->droq[0]) + oct->droq[0] = vzalloc(sizeof(*oct->droq[0])); + if (!oct->droq[0]) + return 1; + + if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) { + vfree(oct->droq[oq_no]); + oct->droq[oq_no] = NULL; + return 1; + } + oct->num_oqs++; + + return 0; +} + +int octeon_set_io_queues_off(struct octeon_device *oct) +{ + int loop = BUSY_READING_REG_VF_LOOP_COUNT; + + if (OCTEON_CN6XXX(oct)) { + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); + } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { + u32 q_no; + + /* IOQs will already be in reset. + * If RST bit is set, wait for quiet bit to be set. + * Once quiet bit is set, clear the RST bit. + */ + for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { + u64 reg_val = octeon_read_csr64( + oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); + + while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) && + !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) && + loop) { + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + loop--; + } + if (!loop) { + dev_err(&oct->pci_dev->dev, + "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n", + q_no); + return -1; + } + + reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST; + octeon_write_csr64(oct, + CN23XX_SLI_IQ_PKT_CONTROL64(q_no), + reg_val); + + reg_val = octeon_read_csr64( + oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); + if (reg_val & CN23XX_PKT_INPUT_CTL_RST) { + dev_err(&oct->pci_dev->dev, + "unable to reset qno %u\n", q_no); + return -1; + } + } + } + return 0; +} + +void octeon_set_droq_pkt_op(struct octeon_device *oct, + u32 q_no, + u32 enable) +{ + u32 reg_val = 0; + + /* Disable the i/p and o/p queues for this Octeon. */ + if (OCTEON_CN6XXX(oct)) { + reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + + if (enable) + reg_val = reg_val | (1 << q_no); + else + reg_val = reg_val & (~(1 << q_no)); + + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val); + } +} + +int octeon_init_dispatch_list(struct octeon_device *oct) +{ + u32 i; + + oct->dispatch.count = 0; + + for (i = 0; i < DISPATCH_LIST_SIZE; i++) { + oct->dispatch.dlist[i].opcode = 0; + INIT_LIST_HEAD(&oct->dispatch.dlist[i].list); + } + + for (i = 0; i <= REQTYPE_LAST; i++) + octeon_register_reqtype_free_fn(oct, i, NULL); + + spin_lock_init(&oct->dispatch.lock); + + return 0; +} + +void octeon_delete_dispatch_list(struct octeon_device *oct) +{ + u32 i; + struct list_head freelist, *temp, *tmp2; + + INIT_LIST_HEAD(&freelist); + + spin_lock_bh(&oct->dispatch.lock); + + for (i = 0; i < DISPATCH_LIST_SIZE; i++) { + struct list_head *dispatch; + + dispatch = &oct->dispatch.dlist[i].list; + while (dispatch->next != dispatch) { + temp = dispatch->next; + list_move_tail(temp, &freelist); + } + + oct->dispatch.dlist[i].opcode = 0; + } + + oct->dispatch.count = 0; + + spin_unlock_bh(&oct->dispatch.lock); + + list_for_each_safe(temp, tmp2, &freelist) { + list_del(temp); + kfree(temp); + } +} + +octeon_dispatch_fn_t +octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, + u16 subcode) +{ + u32 idx; + struct list_head *dispatch; + octeon_dispatch_fn_t fn = NULL; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&octeon_dev->dispatch.lock); + + if (octeon_dev->dispatch.count == 0) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (!(octeon_dev->dispatch.dlist[idx].opcode)) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { + fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; + } else { + list_for_each(dispatch, + &octeon_dev->dispatch.dlist[idx].list) { + if (((struct octeon_dispatch *)dispatch)->opcode == + combined_opcode) { + fn = ((struct octeon_dispatch *) + dispatch)->dispatch_fn; + break; + } + } + } + + spin_unlock_bh(&octeon_dev->dispatch.lock); + return fn; +} + +/* octeon_register_dispatch_fn + * Parameters: + * octeon_id - id of the octeon device. + * opcode - opcode for which driver should call the registered function + * subcode - subcode for which driver should call the registered function + * fn - The function to call when a packet with "opcode" arrives in + * octeon output queues. + * fn_arg - The argument to be passed when calling function "fn". + * Description: + * Registers a function and its argument to be called when a packet + * arrives in Octeon output queues with "opcode". + * Returns: + * Success: 0 + * Failure: 1 + * Locks: + * No locks are held. + */ +int +octeon_register_dispatch_fn(struct octeon_device *oct, + u16 opcode, + u16 subcode, + octeon_dispatch_fn_t fn, void *fn_arg) +{ + u32 idx; + octeon_dispatch_fn_t pfn; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&oct->dispatch.lock); + /* Add dispatch function to first level of lookup table */ + if (oct->dispatch.dlist[idx].opcode == 0) { + oct->dispatch.dlist[idx].opcode = combined_opcode; + oct->dispatch.dlist[idx].dispatch_fn = fn; + oct->dispatch.dlist[idx].arg = fn_arg; + oct->dispatch.count++; + spin_unlock_bh(&oct->dispatch.lock); + return 0; + } + + spin_unlock_bh(&oct->dispatch.lock); + + /* Check if there was a function already registered for this + * opcode/subcode. + */ + pfn = octeon_get_dispatch(oct, opcode, subcode); + if (!pfn) { + struct octeon_dispatch *dispatch; + + dev_dbg(&oct->pci_dev->dev, + "Adding opcode to dispatch list linked list\n"); + dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL); + if (!dispatch) + return 1; + + dispatch->opcode = combined_opcode; + dispatch->dispatch_fn = fn; + dispatch->arg = fn_arg; + + /* Add dispatch function to linked list of fn ptrs + * at the hashed index. + */ + spin_lock_bh(&oct->dispatch.lock); + list_add(&dispatch->list, &oct->dispatch.dlist[idx].list); + oct->dispatch.count++; + spin_unlock_bh(&oct->dispatch.lock); + + } else { + if (pfn == fn && + octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg) + return 0; + + dev_err(&oct->pci_dev->dev, + "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", + opcode, subcode); + return 1; + } + + return 0; +} + +int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) +{ + u32 i; + char app_name[16]; + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + struct octeon_core_setup *cs = NULL; + u32 num_nic_ports = 0; + + if (OCTEON_CN6XXX(oct)) + num_nic_ports = + CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx)); + else if (OCTEON_CN23XX_PF(oct)) + num_nic_ports = + CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf)); + + if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { + dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", + atomic_read(&oct->status)); + goto core_drv_init_err; + } + + strncpy(app_name, + get_oct_app_string( + (u32)recv_pkt->rh.r_core_drv_init.app_mode), + sizeof(app_name) - 1); + oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; + if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) { + oct->fw_info.max_nic_ports = + (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports; + oct->fw_info.num_gmx_ports = + (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports; + } + + if (oct->fw_info.max_nic_ports < num_nic_ports) { + dev_err(&oct->pci_dev->dev, + "Config has more ports than firmware allows (%d > %d).\n", + num_nic_ports, oct->fw_info.max_nic_ports); + goto core_drv_init_err; + } + oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; + oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; + oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; + + oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind; + + for (i = 0; i < oct->num_iqs; i++) + oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind; + + atomic_set(&oct->status, OCT_DEV_CORE_OK); + + cs = &core_setup[oct->octeon_id]; + + if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) { + dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n", + (u32)sizeof(*cs), + recv_pkt->buffer_size[0]); + } + + memcpy(cs, get_rbd( + recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs)); + + strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); + strncpy(oct->boardinfo.serial_number, cs->board_serial_number, + OCT_SERIAL_LEN); + + octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); + + oct->boardinfo.major = cs->board_rev_major; + oct->boardinfo.minor = cs->board_rev_minor; + + dev_info(&oct->pci_dev->dev, + "Running %s (%llu Hz)\n", + app_name, CVM_CAST64(cs->corefreq)); + +core_drv_init_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) + +{ + if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) && + (oct->io_qmask.iq & BIT_ULL(q_no))) + return oct->instr_queue[q_no]->max_count; + + return -1; +} + +int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) +{ + if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) && + (oct->io_qmask.oq & BIT_ULL(q_no))) + return oct->droq[q_no]->max_count; + return -1; +} + +/* Retruns the host firmware handshake OCTEON specific configuration */ +struct octeon_config *octeon_get_conf(struct octeon_device *oct) +{ + struct octeon_config *default_oct_conf = NULL; + + /* check the OCTEON Device model & return the corresponding octeon + * configuration + */ + + if (OCTEON_CN6XXX(oct)) { + default_oct_conf = + (struct octeon_config *)(CHIP_CONF(oct, cn6xxx)); + } else if (OCTEON_CN23XX_PF(oct)) { + default_oct_conf = (struct octeon_config *) + (CHIP_CONF(oct, cn23xx_pf)); + } else if (OCTEON_CN23XX_VF(oct)) { + default_oct_conf = (struct octeon_config *) + (CHIP_CONF(oct, cn23xx_vf)); + } + return default_oct_conf; +} + +/* scratch register address is same in all the OCT-II and CN70XX models */ +#define CNXX_SLI_SCRATCH1 0x3C0 + +/* Get the octeon device pointer. + * @param octeon_id - The id for which the octeon device pointer is required. + * @return Success: Octeon device pointer. + * @return Failure: NULL. + */ +struct octeon_device *lio_get_device(u32 octeon_id) +{ + if (octeon_id >= MAX_OCTEON_DEVICES) + return NULL; + else + return octeon_device[octeon_id]; +} + +u64 lio_pci_readq(struct octeon_device *oct, u64 addr) +{ + u64 val64; + unsigned long flags; + u32 addrhi; + + spin_lock_irqsave(&oct->pci_win_lock, flags); + + /* The windowed read happens when the LSB of the addr is written. + * So write MSB first + */ + addrhi = (addr >> 32); + if ((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX) || + (oct->chip_id == OCTEON_CN23XX_PF_VID)) + addrhi |= 0x00060000; + writel(addrhi, oct->reg_list.pci_win_rd_addr_hi); + + /* Read back to preserve ordering of writes */ + readl(oct->reg_list.pci_win_rd_addr_hi); + + writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo); + readl(oct->reg_list.pci_win_rd_addr_lo); + + val64 = readq(oct->reg_list.pci_win_rd_data); + + spin_unlock_irqrestore(&oct->pci_win_lock, flags); + + return val64; +} + +void lio_pci_writeq(struct octeon_device *oct, + u64 val, + u64 addr) +{ + unsigned long flags; + + spin_lock_irqsave(&oct->pci_win_lock, flags); + + writeq(addr, oct->reg_list.pci_win_wr_addr); + + /* The write happens when the LSB is written. So write MSB first. */ + writel(val >> 32, oct->reg_list.pci_win_wr_data_hi); + /* Read the MSB to ensure ordering of writes. */ + readl(oct->reg_list.pci_win_wr_data_hi); + + writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo); + + spin_unlock_irqrestore(&oct->pci_win_lock, flags); +} + +int octeon_mem_access_ok(struct octeon_device *oct) +{ + u64 access_okay = 0; + u64 lmc0_reset_ctl; + + /* Check to make sure a DDR interface is enabled */ + if (OCTEON_CN23XX_PF(oct)) { + lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL); + access_okay = + (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK); + } else { + lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); + access_okay = + (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); + } + + return access_okay ? 0 : 1; +} + +int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) +{ + int ret = 1; + u32 ms; + + if (!timeout) + return ret; + + for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); + ms += HZ / 10) { + ret = octeon_mem_access_ok(oct); + + /* wait 100 ms */ + if (ret) + schedule_timeout_uninterruptible(HZ / 10); + } + + return ret; +} + +/* Get the octeon id assigned to the octeon device passed as argument. + * This function is exported to other modules. + * @param dev - octeon device pointer passed as a void *. + * @return octeon device id + */ +int lio_get_device_id(void *dev) +{ + struct octeon_device *octeon_dev = (struct octeon_device *)dev; + u32 i; + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) + if (octeon_device[i] == octeon_dev) + return octeon_dev->octeon_id; + return -1; +} + +void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) +{ + u64 instr_cnt; + u32 pkts_pend; + struct octeon_device *oct = NULL; + + /* the whole thing needs to be atomic, ideally */ + if (droq) { + pkts_pend = (u32)atomic_read(&droq->pkts_pending); + writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg); + droq->pkt_count = pkts_pend; + oct = droq->oct_dev; + } + if (iq) { + spin_lock_bh(&iq->lock); + writel(iq->pkts_processed, iq->inst_cnt_reg); + iq->pkt_in_done -= iq->pkts_processed; + iq->pkts_processed = 0; + /* this write needs to be flushed before we release the lock */ + spin_unlock_bh(&iq->lock); + oct = iq->oct_dev; + } + /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough + *to trigger tx interrupts as well, if they are pending. + */ + if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) { + if (droq) + writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg); + /*we race with firmrware here. read and write the IN_DONE_CNTS*/ + else if (iq) { + instr_cnt = readq(iq->inst_cnt_reg); + writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) | + CN23XX_INTR_RESEND), + iq->inst_cnt_reg); + } + } +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h new file mode 100644 index 000000000..fb380b4f3 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -0,0 +1,911 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file octeon_device.h + * \brief Host Driver: This file defines the octeon device structure. + */ + +#ifndef _OCTEON_DEVICE_H_ +#define _OCTEON_DEVICE_H_ + +#include <linux/interrupt.h> +#include <net/devlink.h> + +/** PCI VendorId Device Id */ +#define OCTEON_CN68XX_PCIID 0x91177d +#define OCTEON_CN66XX_PCIID 0x92177d +#define OCTEON_CN23XX_PCIID_PF 0x9702177d +/** Driver identifies chips by these Ids, created by clubbing together + * DeviceId+RevisionId; Where Revision Id is not used to distinguish + * between chips, a value of 0 is used for revision id. + */ +#define OCTEON_CN68XX 0x0091 +#define OCTEON_CN66XX 0x0092 +#define OCTEON_CN23XX_PF_VID 0x9702 +#define OCTEON_CN23XX_VF_VID 0x9712 + +/**RevisionId for the chips */ +#define OCTEON_CN23XX_REV_1_0 0x00 +#define OCTEON_CN23XX_REV_1_1 0x01 +#define OCTEON_CN23XX_REV_2_0 0x80 + +/**SubsystemId for the chips */ +#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d +#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d +#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d +#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d +#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d + +/** Endian-swap modes supported by Octeon. */ +enum octeon_pci_swap_mode { + OCTEON_PCI_PASSTHROUGH = 0, + OCTEON_PCI_64BIT_SWAP = 1, + OCTEON_PCI_32BIT_BYTE_SWAP = 2, + OCTEON_PCI_32BIT_LW_SWAP = 3 +}; + +enum lio_fw_state { + FW_IS_PRELOADED = 0, + FW_NEEDS_TO_BE_LOADED = 1, + FW_IS_BEING_LOADED = 2, + FW_HAS_BEEN_LOADED = 3, +}; + +enum { + OCTEON_CONFIG_TYPE_DEFAULT = 0, + NUM_OCTEON_CONFS, +}; + +#define OCTEON_INPUT_INTR (1) +#define OCTEON_OUTPUT_INTR (2) +#define OCTEON_MBOX_INTR (4) +#define OCTEON_ALL_INTR 0xff + +/*--------------- PCI BAR1 index registers -------------*/ + +/* BAR1 Mask */ +#define PCI_BAR1_ENABLE_CA 1 +#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP +#define PCI_BAR1_ENTRY_VALID 1 +#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \ + | (PCI_BAR1_ENDIAN_MODE << 1) \ + | PCI_BAR1_ENTRY_VALID) + +/** Octeon Device state. + * Each octeon device goes through each of these states + * as it is initialized. + */ +#define OCT_DEV_BEGIN_STATE 0x0 +#define OCT_DEV_PCI_ENABLE_DONE 0x1 +#define OCT_DEV_PCI_MAP_DONE 0x2 +#define OCT_DEV_DISPATCH_INIT_DONE 0x3 +#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x4 +#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x5 +#define OCT_DEV_RESP_LIST_INIT_DONE 0x6 +#define OCT_DEV_DROQ_INIT_DONE 0x7 +#define OCT_DEV_MBOX_SETUP_DONE 0x8 +#define OCT_DEV_MSIX_ALLOC_VECTOR_DONE 0x9 +#define OCT_DEV_INTR_SET_DONE 0xa +#define OCT_DEV_IO_QUEUES_DONE 0xb +#define OCT_DEV_CONSOLE_INIT_DONE 0xc +#define OCT_DEV_HOST_OK 0xd +#define OCT_DEV_CORE_OK 0xe +#define OCT_DEV_RUNNING 0xf +#define OCT_DEV_IN_RESET 0x10 +#define OCT_DEV_STATE_INVALID 0x11 + +#define OCT_DEV_STATES OCT_DEV_STATE_INVALID + +/** Octeon Device interrupts + * These interrupt bits are set in int_status filed of + * octeon_device structure + */ +#define OCT_DEV_INTR_DMA0_FORCE 0x01 +#define OCT_DEV_INTR_DMA1_FORCE 0x02 +#define OCT_DEV_INTR_PKT_DATA 0x04 + +#define LIO_RESET_SECS (3) + +/*---------------------------DISPATCH LIST-------------------------------*/ + +/** The dispatch list entry. + * The driver keeps a record of functions registered for each + * response header opcode in this structure. Since the opcode is + * hashed to index into the driver's list, more than one opcode + * can hash to the same entry, in which case the list field points + * to a linked list with the other entries. + */ +struct octeon_dispatch { + /** List head for this entry */ + struct list_head list; + + /** The opcode for which the dispatch function & arg should be used */ + u16 opcode; + + /** The function to be called for a packet received by the driver */ + octeon_dispatch_fn_t dispatch_fn; + + /* The application specified argument to be passed to the above + * function along with the received packet + */ + void *arg; +}; + +/** The dispatch list structure. */ +struct octeon_dispatch_list { + /** access to dispatch list must be atomic */ + spinlock_t lock; + + /** Count of dispatch functions currently registered */ + u32 count; + + /** The list of dispatch functions */ + struct octeon_dispatch *dlist; +}; + +/*----------------------- THE OCTEON DEVICE ---------------------------*/ + +#define OCT_MEM_REGIONS 3 +/** PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octeon_mmio { + /** PCI address to which the BAR is mapped. */ + u64 start; + + /** Length of this PCI address space. */ + u32 len; + + /** Length that has been mapped to phys. address space. */ + u32 mapped_len; + + /** The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /** Flag indicating the mapping was successful. */ + u32 done; +}; + +#define MAX_OCTEON_MAPS 32 + +struct octeon_io_enable { + u64 iq; + u64 oq; + u64 iq64B; +}; + +struct octeon_reg_list { + u32 __iomem *pci_win_wr_addr_hi; + u32 __iomem *pci_win_wr_addr_lo; + u64 __iomem *pci_win_wr_addr; + + u32 __iomem *pci_win_rd_addr_hi; + u32 __iomem *pci_win_rd_addr_lo; + u64 __iomem *pci_win_rd_addr; + + u32 __iomem *pci_win_wr_data_hi; + u32 __iomem *pci_win_wr_data_lo; + u64 __iomem *pci_win_wr_data; + + u32 __iomem *pci_win_rd_data_hi; + u32 __iomem *pci_win_rd_data_lo; + u64 __iomem *pci_win_rd_data; +}; + +#define OCTEON_CONSOLE_MAX_READ_BYTES 512 +typedef int (*octeon_console_print_fn)(struct octeon_device *oct, + u32 num, char *pre, char *suf); +struct octeon_console { + u32 active; + u32 waiting; + u64 addr; + u32 buffer_size; + u64 input_base_addr; + u64 output_base_addr; + octeon_console_print_fn print; + char leftover[OCTEON_CONSOLE_MAX_READ_BYTES]; +}; + +struct octeon_board_info { + char name[OCT_BOARD_NAME]; + char serial_number[OCT_SERIAL_LEN]; + u64 major; + u64 minor; +}; + +struct octeon_fn_list { + void (*setup_iq_regs)(struct octeon_device *, u32); + void (*setup_oq_regs)(struct octeon_device *, u32); + + irqreturn_t (*process_interrupt_regs)(void *); + u64 (*msix_interrupt_handler)(void *); + + int (*setup_mbox)(struct octeon_device *); + int (*free_mbox)(struct octeon_device *); + + int (*soft_reset)(struct octeon_device *); + int (*setup_device_regs)(struct octeon_device *); + void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); + void (*bar1_idx_write)(struct octeon_device *, u32, u32); + u32 (*bar1_idx_read)(struct octeon_device *, u32); + u32 (*update_iq_read_idx)(struct octeon_instr_queue *); + + void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); + void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); + + void (*enable_interrupt)(struct octeon_device *, u8); + void (*disable_interrupt)(struct octeon_device *, u8); + + int (*enable_io_queues)(struct octeon_device *); + void (*disable_io_queues)(struct octeon_device *); +}; + +/* Must be multiple of 8, changing breaks ABI */ +#define CVMX_BOOTMEM_NAME_LEN 128 + +/* Structure for named memory blocks + * Number of descriptors + * available can be changed without affecting compatibility, + * but name length changes require a bump in the bootmem + * descriptor version + * Note: This structure must be naturally 64 bit aligned, as a single + * memory image will be used by both 32 and 64 bit programs. + */ +struct cvmx_bootmem_named_block_desc { + /** Base address of named block */ + u64 base_addr; + + /** Size actually allocated for named block */ + u64 size; + + /** name of named block */ + char name[CVMX_BOOTMEM_NAME_LEN]; +}; + +struct oct_fw_info { + u32 max_nic_ports; /** max nic ports for the device */ + u32 num_gmx_ports; /** num gmx ports */ + u64 app_cap_flags; /** firmware cap flags */ + + /** The core application is running in this mode. + * See octeon-drv-opcodes.h for values. + */ + u32 app_mode; + char liquidio_firmware_version[32]; + /* Fields extracted from legacy string 'liquidio_firmware_version' */ + struct { + u8 maj; + u8 min; + u8 rev; + } ver; +}; + +#define OCT_FW_VER(maj, min, rev) \ + (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev))) + +/* wrappers around work structs */ +struct cavium_wk { + struct delayed_work work; + void *ctxptr; + u64 ctxul; +}; + +struct cavium_wq { + struct workqueue_struct *wq; + struct cavium_wk wk; +}; + +struct octdev_props { + /* Each interface in the Octeon device has a network + * device pointer (used for OS specific calls). + */ + int rx_on; + int fec; + int fec_boot; + int napi_enabled; + int gmxport; + struct net_device *netdev; +}; + +#define LIO_FLAG_MSIX_ENABLED 0x1 +#define MSIX_PO_INT 0x1 +#define MSIX_PI_INT 0x2 +#define MSIX_MBOX_INT 0x4 + +struct octeon_pf_vf_hs_word { +#ifdef __LITTLE_ENDIAN_BITFIELD + /** PKIND value assigned for the DPI interface */ + u64 pkind : 8; + + /** OCTEON core clock multiplier */ + u64 core_tics_per_us : 16; + + /** OCTEON coprocessor clock multiplier */ + u64 coproc_tics_per_us : 16; + + /** app that currently running on OCTEON */ + u64 app_mode : 8; + + /** RESERVED */ + u64 reserved : 16; + +#else + + /** RESERVED */ + u64 reserved : 16; + + /** app that currently running on OCTEON */ + u64 app_mode : 8; + + /** OCTEON coprocessor clock multiplier */ + u64 coproc_tics_per_us : 16; + + /** OCTEON core clock multiplier */ + u64 core_tics_per_us : 16; + + /** PKIND value assigned for the DPI interface */ + u64 pkind : 8; +#endif +}; + +struct octeon_sriov_info { + /* Number of rings assigned to VF */ + u32 rings_per_vf; + + /** Max Number of VF devices that can be enabled. This variable can + * specified during load time or it will be derived after allocating + * PF queues. When max_vfs is derived then each VF will get one queue + **/ + u32 max_vfs; + + /** Number of VF devices enabled using sysfs. */ + u32 num_vfs_alloced; + + /* Actual rings left for PF device */ + u32 num_pf_rings; + + /* SRN of PF usable IO queues */ + u32 pf_srn; + + /* total pf rings */ + u32 trs; + + u32 sriov_enabled; + + struct lio_trusted_vf trusted_vf; + + /*lookup table that maps DPI ring number to VF pci_dev struct pointer*/ + struct pci_dev *dpiring_to_vfpcidev_lut[MAX_POSSIBLE_VFS]; + + u64 vf_macaddr[MAX_POSSIBLE_VFS]; + + u16 vf_vlantci[MAX_POSSIBLE_VFS]; + + int vf_linkstate[MAX_POSSIBLE_VFS]; + + bool vf_spoofchk[MAX_POSSIBLE_VFS]; + + u64 vf_drv_loaded_mask; +}; + +struct octeon_ioq_vector { + struct octeon_device *oct_dev; + int iq_index; + int droq_index; + int vector; + struct octeon_mbox *mbox; + struct cpumask affinity_mask; + u32 ioq_num; +}; + +struct lio_vf_rep_list { + int num_vfs; + struct net_device *ndev[CN23XX_MAX_VFS_PER_PF]; +}; + +struct lio_devlink_priv { + struct octeon_device *oct; +}; + +/** The Octeon device. + * Each Octeon device has this structure to represent all its + * components. + */ +struct octeon_device { + /** Lock for PCI window configuration accesses */ + spinlock_t pci_win_lock; + + /** Lock for memory accesses */ + spinlock_t mem_access_lock; + + /** PCI device pointer */ + struct pci_dev *pci_dev; + + /** Chip specific information. */ + void *chip; + + /** Number of interfaces detected in this octeon device. */ + u32 ifcount; + + struct octdev_props props[MAX_OCTEON_LINKS]; + + /** Octeon Chip type. */ + u16 chip_id; + + u16 rev_id; + + u32 subsystem_id; + + u16 pf_num; + + u16 vf_num; + + /** This device's id - set by the driver. */ + u32 octeon_id; + + /** This device's PCIe port used for traffic. */ + u16 pcie_port; + + u16 flags; +#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1) + + /** The state of this device */ + atomic_t status; + + /** memory mapped io range */ + struct octeon_mmio mmio[OCT_MEM_REGIONS]; + + struct octeon_reg_list reg_list; + + struct octeon_fn_list fn_list; + + struct octeon_board_info boardinfo; + + u32 num_iqs; + + /* The pool containing pre allocated buffers used for soft commands */ + struct octeon_sc_buffer_pool sc_buf_pool; + + /** The input instruction queues */ + struct octeon_instr_queue *instr_queue + [MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; + + /** The doubly-linked list of instruction response */ + struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; + + u32 num_oqs; + + /** The DROQ output queues */ + struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; + + struct octeon_io_enable io_qmask; + + /** List of dispatch functions */ + struct octeon_dispatch_list dispatch; + + u32 int_status; + + u64 droq_intr; + + /** Physical location of the cvmx_bootmem_desc_t in octeon memory */ + u64 bootmem_desc_addr; + + /** Placeholder memory for named blocks. + * Assumes single-threaded access + */ + struct cvmx_bootmem_named_block_desc bootmem_named_block_desc; + + /** Address of consoles descriptor */ + u64 console_desc_addr; + + /** Number of consoles available. 0 means they are inaccessible */ + u32 num_consoles; + + /* Console caches */ + struct octeon_console console[MAX_OCTEON_MAPS]; + + /* Console named block info */ + struct { + u64 dram_region_base; + int bar1_index; + } console_nb_info; + + /* Coprocessor clock rate. */ + u64 coproc_clock_rate; + + /** The core application is running in this mode. See liquidio_common.h + * for values. + */ + u32 app_mode; + + struct oct_fw_info fw_info; + + /** The name given to this device. */ + char device_name[32]; + + /** Application Context */ + void *app_ctx; + + struct cavium_wq dma_comp_wq; + + /** Lock for dma response list */ + spinlock_t cmd_resp_wqlock; + u32 cmd_resp_state; + + struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES]; + + struct cavium_wk nic_poll_work; + + struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; + + void *priv; + + int num_msix_irqs; + + void *msix_entries; + + /* when requesting IRQs, the names are stored here */ + void *irq_name_storage; + + struct octeon_sriov_info sriov_info; + + struct octeon_pf_vf_hs_word pfvf_hsword; + + int msix_on; + + /** Mail Box details of each octeon queue. */ + struct octeon_mbox *mbox[MAX_POSSIBLE_VFS]; + + /** IOq information of it's corresponding MSI-X interrupt. */ + struct octeon_ioq_vector *ioq_vector; + + int rx_pause; + int tx_pause; + + struct oct_link_stats link_stats; /*stastics from firmware*/ + + /* private flags to control driver-specific features through ethtool */ + u32 priv_flags; + + void *watchdog_task; + + u32 rx_coalesce_usecs; + u32 rx_max_coalesced_frames; + u32 tx_max_coalesced_frames; + + bool cores_crashed; + + struct { + int bus; + int dev; + int func; + } loc; + + atomic_t *adapter_refcount; /* reference count of adapter */ + + atomic_t *adapter_fw_state; /* per-adapter, lio_fw_state */ + + bool ptp_enable; + + struct lio_vf_rep_list vf_rep_list; + struct devlink *devlink; + enum devlink_eswitch_mode eswitch_mode; + + /* for 25G NIC speed change */ + u8 speed_boot; + u8 speed_setting; + u8 no_speed_setting; + + u32 vfstats_poll; +#define LIO_VFSTATS_POLL 10 +}; + +#define OCT_DRV_ONLINE 1 +#define OCT_DRV_OFFLINE 2 +#define OCTEON_CN6XXX(oct) ({ \ + typeof(oct) _oct = (oct); \ + ((_oct->chip_id == OCTEON_CN66XX) || \ + (_oct->chip_id == OCTEON_CN68XX)); }) +#define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID) +#define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID) +#define CHIP_CONF(oct, TYPE) \ + (((struct octeon_ ## TYPE *)((oct)->chip))->conf) + +#define MAX_IO_PENDING_PKT_COUNT 100 + +/*------------------ Function Prototypes ----------------------*/ + +/** Initialize device list memory */ +void octeon_init_device_list(int conf_type); + +/** Free memory for Input and Output queue structures for a octeon device */ +void octeon_free_device_mem(struct octeon_device *oct); + +/* Look up a free entry in the octeon_device table and allocate resources + * for the octeon_device structure for an octeon device. Called at init + * time. + */ +struct octeon_device *octeon_allocate_device(u32 pci_id, + u32 priv_size); + +/** Register a device's bus location at initialization time. + * @param octeon_dev - pointer to the octeon device structure. + * @param bus - PCIe bus # + * @param dev - PCIe device # + * @param func - PCIe function # + * @param is_pf - TRUE for PF, FALSE for VF + * @return reference count of device's adapter + */ +int octeon_register_device(struct octeon_device *oct, + int bus, int dev, int func, int is_pf); + +/** Deregister a device at de-initialization time. + * @param octeon_dev - pointer to the octeon device structure. + * @return reference count of device's adapter + */ +int octeon_deregister_device(struct octeon_device *oct); + +/** Initialize the driver's dispatch list which is a mix of a hash table + * and a linked list. This is done at driver load time. + * @param octeon_dev - pointer to the octeon device structure. + * @return 0 on success, else -ve error value + */ +int octeon_init_dispatch_list(struct octeon_device *octeon_dev); + +/** Delete the driver's dispatch list and all registered entries. + * This is done at driver unload time. + * @param octeon_dev - pointer to the octeon device structure. + */ +void octeon_delete_dispatch_list(struct octeon_device *octeon_dev); + +/** Initialize the core device fields with the info returned by the FW. + * @param recv_info - Receive info structure + * @param buf - Receive buffer + */ +int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf); + +/** Gets the dispatch function registered to receive packets with a + * given opcode/subcode. + * @param octeon_dev - the octeon device pointer. + * @param opcode - the opcode for which the dispatch function + * is to checked. + * @param subcode - the subcode for which the dispatch function + * is to checked. + * + * @return Success: octeon_dispatch_fn_t (dispatch function pointer) + * @return Failure: NULL + * + * Looks up the dispatch list to get the dispatch function for a + * given opcode. + */ +octeon_dispatch_fn_t +octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, + u16 subcode); + +/** Get the octeon device pointer. + * @param octeon_id - The id for which the octeon device pointer is required. + * @return Success: Octeon device pointer. + * @return Failure: NULL. + */ +struct octeon_device *lio_get_device(u32 octeon_id); + +/** Get the octeon id assigned to the octeon device passed as argument. + * This function is exported to other modules. + * @param dev - octeon device pointer passed as a void *. + * @return octeon device id + */ +int lio_get_device_id(void *dev); + +/** Read windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to read. + * + * This routine is called to read from the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return - 64 bit value read from the register. + */ + +u64 lio_pci_readq(struct octeon_device *oct, u64 addr); + +/** Write windowed register. + * @param oct - pointer to the Octeon device. + * @param val - Value to write + * @param addr - Address of the register to write + * + * This routine is called to write to the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return Nothing. + */ +void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr); + +/* Routines for reading and writing CSRs */ +#define octeon_write_csr(oct_dev, reg_off, value) \ + writel(value, (oct_dev)->mmio[0].hw_addr + (reg_off)) + +#define octeon_write_csr64(oct_dev, reg_off, val64) \ + writeq(val64, (oct_dev)->mmio[0].hw_addr + (reg_off)) + +#define octeon_read_csr(oct_dev, reg_off) \ + readl((oct_dev)->mmio[0].hw_addr + (reg_off)) + +#define octeon_read_csr64(oct_dev, reg_off) \ + readq((oct_dev)->mmio[0].hw_addr + (reg_off)) + +/** + * Checks if memory access is okay + * + * @param oct which octeon to send to + * @return Zero on success, negative on failure. + */ +int octeon_mem_access_ok(struct octeon_device *oct); + +/** + * Waits for DDR initialization. + * + * @param oct which octeon to send to + * @param timeout_in_ms pointer to how long to wait until DDR is initialized + * in ms. + * If contents are 0, it waits until contents are non-zero + * before starting to check. + * @return Zero on success, negative on failure. + */ +int octeon_wait_for_ddr_init(struct octeon_device *oct, + u32 *timeout_in_ms); + +/** + * Wait for u-boot to boot and be waiting for a command. + * + * @param wait_time_hundredths + * Maximum time to wait + * + * @return Zero on success, negative on failure. + */ +int octeon_wait_for_bootloader(struct octeon_device *oct, + u32 wait_time_hundredths); + +/** + * Initialize console access + * + * @param oct which octeon initialize + * @return Zero on success, negative on failure. + */ +int octeon_init_consoles(struct octeon_device *oct); + +/** + * Adds access to a console to the device. + * + * @param oct: which octeon to add to + * @param console_num: which console + * @param dbg_enb: ptr to debug enablement string, one of: + * * NULL for no debug output (i.e. disabled) + * * empty string enables debug output (via default method) + * * specific string to enable debug console output + * + * @return Zero on success, negative on failure. + */ +int octeon_add_console(struct octeon_device *oct, u32 console_num, + char *dbg_enb); + +/** write or read from a console */ +int octeon_console_write(struct octeon_device *oct, u32 console_num, + char *buffer, u32 write_request_size, u32 flags); +int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); + +int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); + +/** Removes all attached consoles. */ +void octeon_remove_consoles(struct octeon_device *oct); + +/** + * Send a string to u-boot on console 0 as a command. + * + * @param oct which octeon to send to + * @param cmd_str String to send + * @param wait_hundredths Time to wait for u-boot to accept the command. + * + * @return Zero on success, negative on failure. + */ +int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, + u32 wait_hundredths); + +/** Parses, validates, and downloads firmware, then boots associated cores. + * @param oct which octeon to download firmware to + * @param data - The complete firmware file image + * @param size - The size of the data + * + * @return 0 if success. + * -EINVAL if file is incompatible or badly formatted. + * -ENODEV if no handler was found for the application type or an + * invalid octeon id was passed. + */ +int octeon_download_firmware(struct octeon_device *oct, const u8 *data, + size_t size); + +char *lio_get_state_string(atomic_t *state_ptr); + +/** Sets up instruction queues for the device + * @param oct which octeon to setup + * + * @return 0 if success. 1 if fails + */ +int octeon_setup_instr_queues(struct octeon_device *oct); + +/** Sets up output queues for the device + * @param oct which octeon to setup + * + * @return 0 if success. 1 if fails + */ +int octeon_setup_output_queues(struct octeon_device *oct); + +int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no); + +int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); + +/** Turns off the input and output queues for the device + * @param oct which octeon to disable + */ +int octeon_set_io_queues_off(struct octeon_device *oct); + +/** Turns on or off the given output queue for the device + * @param oct which octeon to change + * @param q_no which queue + * @param enable 1 to enable, 0 to disable + */ +void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable); + +/** Retrieve the config for the device + * @param oct which octeon + * @param card_type type of card + * + * @returns pointer to configuration + */ +void *oct_get_config_info(struct octeon_device *oct, u16 card_type); + +/** Gets the octeon device configuration + * @return - pointer to the octeon configuration struture + */ +struct octeon_config *octeon_get_conf(struct octeon_device *oct); + +void octeon_free_ioq_vector(struct octeon_device *oct); +int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs); +void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq); + +/* LiquidIO driver pivate flags */ +enum { + OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */ +}; + +#define OCT_PRIV_FLAG_DEFAULT 0x0 + +static inline u32 lio_get_priv_flag(struct octeon_device *octdev, u32 flag) +{ + return !!(octdev->priv_flags & (0x1 << flag)); +} + +static inline void lio_set_priv_flag(struct octeon_device *octdev, + u32 flag, u32 val) +{ + if (val) + octdev->priv_flags |= (0x1 << flag); + else + octdev->priv_flags &= ~(0x1 << flag); +} +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c new file mode 100644 index 000000000..d4080bddc --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -0,0 +1,969 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" + +struct niclist { + struct list_head list; + void *ptr; +}; + +struct __dispatch { + struct list_head list; + struct octeon_recv_info *rinfo; + octeon_dispatch_fn_t disp_fn; +}; + +/** Get the argument that the user set when registering dispatch + * function for a given opcode/subcode. + * @param octeon_dev - the octeon device pointer. + * @param opcode - the opcode for which the dispatch argument + * is to be checked. + * @param subcode - the subcode for which the dispatch argument + * is to be checked. + * @return Success: void * (argument to the dispatch function) + * @return Failure: NULL + * + */ +void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, + u16 opcode, u16 subcode) +{ + int idx; + struct list_head *dispatch; + void *fn_arg = NULL; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&octeon_dev->dispatch.lock); + + if (octeon_dev->dispatch.count == 0) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { + fn_arg = octeon_dev->dispatch.dlist[idx].arg; + } else { + list_for_each(dispatch, + &octeon_dev->dispatch.dlist[idx].list) { + if (((struct octeon_dispatch *)dispatch)->opcode == + combined_opcode) { + fn_arg = ((struct octeon_dispatch *) + dispatch)->arg; + break; + } + } + } + + spin_unlock_bh(&octeon_dev->dispatch.lock); + return fn_arg; +} + +/** Check for packets on Droq. This function should be called with lock held. + * @param droq - Droq on which count is checked. + * @return Returns packet count. + */ +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq) +{ + u32 pkt_count = 0; + u32 last_count; + + pkt_count = readl(droq->pkts_sent_reg); + + last_count = pkt_count - droq->pkt_count; + droq->pkt_count = pkt_count; + + /* we shall write to cnts at napi irq enable or end of droq tasklet */ + if (last_count) + atomic_add(last_count, &droq->pkts_pending); + + return last_count; +} + +static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) +{ + u32 count = 0; + + /* max_empty_descs is the max. no. of descs that can have no buffers. + * If the empty desc count goes beyond this value, we cannot safely + * read in a 64K packet sent by Octeon + * (64K is max pkt size from Octeon) + */ + droq->max_empty_descs = 0; + + do { + droq->max_empty_descs++; + count += droq->buffer_size; + } while (count < (64 * 1024)); + + droq->max_empty_descs = droq->max_count - droq->max_empty_descs; +} + +static void octeon_droq_reset_indices(struct octeon_droq *droq) +{ + droq->read_idx = 0; + droq->write_idx = 0; + droq->refill_idx = 0; + droq->refill_count = 0; + atomic_set(&droq->pkts_pending, 0); +} + +static void +octeon_droq_destroy_ring_buffers(struct octeon_device *oct, + struct octeon_droq *droq) +{ + u32 i; + struct octeon_skb_page_info *pg_info; + + for (i = 0; i < droq->max_count; i++) { + pg_info = &droq->recv_buf_list[i].pg_info; + if (!pg_info) + continue; + + if (pg_info->dma) + lio_unmap_ring(oct->pci_dev, + (u64)pg_info->dma); + pg_info->dma = 0; + + if (pg_info->page) + recv_buffer_destroy(droq->recv_buf_list[i].buffer, + pg_info); + + droq->recv_buf_list[i].buffer = NULL; + } + + octeon_droq_reset_indices(droq); +} + +static int +octeon_droq_setup_ring_buffers(struct octeon_device *oct, + struct octeon_droq *droq) +{ + u32 i; + void *buf; + struct octeon_droq_desc *desc_ring = droq->desc_ring; + + for (i = 0; i < droq->max_count; i++) { + buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); + + if (!buf) { + dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", + __func__); + droq->stats.rx_alloc_failure++; + return -ENOMEM; + } + + droq->recv_buf_list[i].buffer = buf; + droq->recv_buf_list[i].data = get_rbd(buf); + desc_ring[i].info_ptr = 0; + desc_ring[i].buffer_ptr = + lio_map_ring(droq->recv_buf_list[i].buffer); + } + + octeon_droq_reset_indices(droq); + + octeon_droq_compute_max_packet_bufs(droq); + + return 0; +} + +int octeon_delete_droq(struct octeon_device *oct, u32 q_no) +{ + struct octeon_droq *droq = oct->droq[q_no]; + + dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); + + octeon_droq_destroy_ring_buffers(oct, droq); + vfree(droq->recv_buf_list); + + if (droq->desc_ring) + lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), + droq->desc_ring, droq->desc_ring_dma); + + memset(droq, 0, OCT_DROQ_SIZE); + oct->io_qmask.oq &= ~(1ULL << q_no); + vfree(oct->droq[q_no]); + oct->droq[q_no] = NULL; + oct->num_oqs--; + + return 0; +} + +int octeon_init_droq(struct octeon_device *oct, + u32 q_no, + u32 num_descs, + u32 desc_size, + void *app_ctx) +{ + struct octeon_droq *droq; + u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; + u32 c_pkts_per_intr = 0, c_refill_threshold = 0; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); + + droq = oct->droq[q_no]; + memset(droq, 0, OCT_DROQ_SIZE); + + droq->oct_dev = oct; + droq->q_no = q_no; + if (app_ctx) + droq->app_ctx = app_ctx; + else + droq->app_ctx = (void *)(size_t)q_no; + + c_num_descs = num_descs; + c_buf_size = desc_size; + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx); + + c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); + c_refill_threshold = + (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + } else if (OCTEON_CN23XX_PF(oct)) { + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf); + + c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); + c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); + } else if (OCTEON_CN23XX_VF(oct)) { + struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf); + + c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23); + c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23); + } else { + return 1; + } + + droq->max_count = c_num_descs; + droq->buffer_size = c_buf_size; + + desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; + droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, + (dma_addr_t *)&droq->desc_ring_dma); + + if (!droq->desc_ring) { + dev_err(&oct->pci_dev->dev, + "Output queue %d ring alloc failed\n", q_no); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", + q_no, droq->desc_ring, droq->desc_ring_dma); + dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, + droq->max_count); + + droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE), + numa_node); + if (!droq->recv_buf_list) + droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE)); + if (!droq->recv_buf_list) { + dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); + goto init_droq_fail; + } + + if (octeon_droq_setup_ring_buffers(oct, droq)) + goto init_droq_fail; + + droq->pkts_per_intr = c_pkts_per_intr; + droq->refill_threshold = c_refill_threshold; + + dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", + droq->max_empty_descs); + + INIT_LIST_HEAD(&droq->dispatch_list); + + /* For 56xx Pass1, this function won't be called, so no checks. */ + oct->fn_list.setup_oq_regs(oct, q_no); + + oct->io_qmask.oq |= BIT_ULL(q_no); + + return 0; + +init_droq_fail: + octeon_delete_droq(oct, q_no); + return 1; +} + +/* octeon_create_recv_info + * Parameters: + * octeon_dev - pointer to the octeon device structure + * droq - droq in which the packet arrived. + * buf_cnt - no. of buffers used by the packet. + * idx - index in the descriptor for the first buffer in the packet. + * Description: + * Allocates a recv_info_t and copies the buffer addresses for packet data + * into the recv_pkt space which starts at an 8B offset from recv_info_t. + * Flags the descriptors for refill later. If available descriptors go + * below the threshold to receive a 64K pkt, new buffers are first allocated + * before the recv_pkt_t is created. + * This routine will be called in interrupt context. + * Returns: + * Success: Pointer to recv_info_t + * Failure: NULL. + */ +static inline struct octeon_recv_info *octeon_create_recv_info( + struct octeon_device *octeon_dev, + struct octeon_droq *droq, + u32 buf_cnt, + u32 idx) +{ + struct octeon_droq_info *info; + struct octeon_recv_pkt *recv_pkt; + struct octeon_recv_info *recv_info; + u32 i, bytes_left; + struct octeon_skb_page_info *pg_info; + + info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data; + + recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); + if (!recv_info) + return NULL; + + recv_pkt = recv_info->recv_pkt; + recv_pkt->rh = info->rh; + recv_pkt->length = (u32)info->length; + recv_pkt->buffer_count = (u16)buf_cnt; + recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; + + i = 0; + bytes_left = (u32)info->length; + + while (buf_cnt) { + { + pg_info = &droq->recv_buf_list[idx].pg_info; + + lio_unmap_ring(octeon_dev->pci_dev, + (u64)pg_info->dma); + pg_info->page = NULL; + pg_info->dma = 0; + } + + recv_pkt->buffer_size[i] = + (bytes_left >= + droq->buffer_size) ? droq->buffer_size : bytes_left; + + recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; + droq->recv_buf_list[idx].buffer = NULL; + + idx = incr_index(idx, 1, droq->max_count); + bytes_left -= droq->buffer_size; + i++; + buf_cnt--; + } + + return recv_info; +} + +/* If we were not able to refill all buffers, try to move around + * the buffers that were not dispatched. + */ +static inline u32 +octeon_droq_refill_pullup_descs(struct octeon_droq *droq, + struct octeon_droq_desc *desc_ring) +{ + u32 desc_refilled = 0; + + u32 refill_index = droq->refill_idx; + + while (refill_index != droq->read_idx) { + if (droq->recv_buf_list[refill_index].buffer) { + droq->recv_buf_list[droq->refill_idx].buffer = + droq->recv_buf_list[refill_index].buffer; + droq->recv_buf_list[droq->refill_idx].data = + droq->recv_buf_list[refill_index].data; + desc_ring[droq->refill_idx].buffer_ptr = + desc_ring[refill_index].buffer_ptr; + droq->recv_buf_list[refill_index].buffer = NULL; + desc_ring[refill_index].buffer_ptr = 0; + do { + droq->refill_idx = incr_index(droq->refill_idx, + 1, + droq->max_count); + desc_refilled++; + droq->refill_count--; + } while (droq->recv_buf_list[droq->refill_idx].buffer); + } + refill_index = incr_index(refill_index, 1, droq->max_count); + } /* while */ + return desc_refilled; +} + +/* octeon_droq_refill + * Parameters: + * droq - droq in which descriptors require new buffers. + * Description: + * Called during normal DROQ processing in interrupt mode or by the poll + * thread to refill the descriptors from which buffers were dispatched + * to upper layers. Attempts to allocate new buffers. If that fails, moves + * up buffers (that were not dispatched) to form a contiguous ring. + * Returns: + * No of descriptors refilled. + */ +static u32 +octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) +{ + struct octeon_droq_desc *desc_ring; + void *buf = NULL; + u8 *data; + u32 desc_refilled = 0; + struct octeon_skb_page_info *pg_info; + + desc_ring = droq->desc_ring; + + while (droq->refill_count && (desc_refilled < droq->max_count)) { + /* If a valid buffer exists (happens if there is no dispatch), + * reuse the buffer, else allocate. + */ + if (!droq->recv_buf_list[droq->refill_idx].buffer) { + pg_info = + &droq->recv_buf_list[droq->refill_idx].pg_info; + /* Either recycle the existing pages or go for + * new page alloc + */ + if (pg_info->page) + buf = recv_buffer_reuse(octeon_dev, pg_info); + else + buf = recv_buffer_alloc(octeon_dev, pg_info); + /* If a buffer could not be allocated, no point in + * continuing + */ + if (!buf) { + droq->stats.rx_alloc_failure++; + break; + } + droq->recv_buf_list[droq->refill_idx].buffer = + buf; + data = get_rbd(buf); + } else { + data = get_rbd(droq->recv_buf_list + [droq->refill_idx].buffer); + } + + droq->recv_buf_list[droq->refill_idx].data = data; + + desc_ring[droq->refill_idx].buffer_ptr = + lio_map_ring(droq->recv_buf_list[ + droq->refill_idx].buffer); + + droq->refill_idx = incr_index(droq->refill_idx, 1, + droq->max_count); + desc_refilled++; + droq->refill_count--; + } + + if (droq->refill_count) + desc_refilled += + octeon_droq_refill_pullup_descs(droq, desc_ring); + + /* if droq->refill_count + * The refill count would not change in pass two. We only moved buffers + * to close the gap in the ring, but we would still have the same no. of + * buffers to refill. + */ + return desc_refilled; +} + +/** check if we can allocate packets to get out of oom. + * @param droq - Droq being checked. + * @return 1 if fails to refill minimum + */ +int octeon_retry_droq_refill(struct octeon_droq *droq) +{ + struct octeon_device *oct = droq->oct_dev; + int desc_refilled, reschedule = 1; + u32 pkts_credit; + + pkts_credit = readl(droq->pkts_credit_reg); + desc_refilled = octeon_droq_refill(oct, droq); + if (desc_refilled) { + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + + if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP) + reschedule = 0; + } + + return reschedule; +} + +static inline u32 +octeon_droq_get_bufcount(u32 buf_size, u32 total_len) +{ + return DIV_ROUND_UP(total_len, buf_size); +} + +static int +octeon_droq_dispatch_pkt(struct octeon_device *oct, + struct octeon_droq *droq, + union octeon_rh *rh, + struct octeon_droq_info *info) +{ + u32 cnt; + octeon_dispatch_fn_t disp_fn; + struct octeon_recv_info *rinfo; + + cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); + + disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, + (u16)rh->r.subcode); + if (disp_fn) { + rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); + if (rinfo) { + struct __dispatch *rdisp = rinfo->rsvd; + + rdisp->rinfo = rinfo; + rdisp->disp_fn = disp_fn; + rinfo->recv_pkt->rh = *rh; + list_add_tail(&rdisp->list, + &droq->dispatch_list); + } else { + droq->stats.dropped_nomem++; + } + } else { + dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n", + (unsigned int)rh->r.opcode, + (unsigned int)rh->r.subcode); + droq->stats.dropped_nodispatch++; + } + + return cnt; +} + +static inline void octeon_droq_drop_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 cnt) +{ + u32 i = 0, buf_cnt; + struct octeon_droq_info *info; + + for (i = 0; i < cnt; i++) { + info = (struct octeon_droq_info *) + droq->recv_buf_list[droq->read_idx].data; + octeon_swap_8B_data((u64 *)info, 2); + + if (info->length) { + info->length += OCTNET_FRM_LENGTH_SIZE; + droq->stats.bytes_received += info->length; + buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, + (u32)info->length); + } else { + dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); + buf_cnt = 1; + } + + droq->read_idx = incr_index(droq->read_idx, buf_cnt, + droq->max_count); + droq->refill_count += buf_cnt; + } +} + +static u32 +octeon_droq_fast_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 pkts_to_process) +{ + u32 pkt, total_len = 0, pkt_count, retval; + struct octeon_droq_info *info; + union octeon_rh *rh; + + pkt_count = pkts_to_process; + + for (pkt = 0; pkt < pkt_count; pkt++) { + u32 pkt_len = 0; + struct sk_buff *nicbuf = NULL; + struct octeon_skb_page_info *pg_info; + void *buf; + + info = (struct octeon_droq_info *) + droq->recv_buf_list[droq->read_idx].data; + octeon_swap_8B_data((u64 *)info, 2); + + if (!info->length) { + dev_err(&oct->pci_dev->dev, + "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", + droq->q_no, droq->read_idx, pkt_count); + print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, + (u8 *)info, + OCT_DROQ_INFO_SIZE); + break; + } + + /* Len of resp hdr in included in the received data len. */ + rh = &info->rh; + + info->length += OCTNET_FRM_LENGTH_SIZE; + rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64)); + total_len += (u32)info->length; + if (opcode_slow_path(rh)) { + u32 buf_cnt; + + buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); + droq->read_idx = incr_index(droq->read_idx, + buf_cnt, droq->max_count); + droq->refill_count += buf_cnt; + } else { + if (info->length <= droq->buffer_size) { + pkt_len = (u32)info->length; + nicbuf = droq->recv_buf_list[ + droq->read_idx].buffer; + pg_info = &droq->recv_buf_list[ + droq->read_idx].pg_info; + if (recv_buffer_recycle(oct, pg_info)) + pg_info->page = NULL; + droq->recv_buf_list[droq->read_idx].buffer = + NULL; + + droq->read_idx = incr_index(droq->read_idx, 1, + droq->max_count); + droq->refill_count++; + } else { + nicbuf = octeon_fast_packet_alloc((u32) + info->length); + pkt_len = 0; + /* nicbuf allocation can fail. We'll handle it + * inside the loop. + */ + while (pkt_len < info->length) { + int cpy_len, idx = droq->read_idx; + + cpy_len = ((pkt_len + droq->buffer_size) + > info->length) ? + ((u32)info->length - pkt_len) : + droq->buffer_size; + + if (nicbuf) { + octeon_fast_packet_next(droq, + nicbuf, + cpy_len, + idx); + buf = droq->recv_buf_list[ + idx].buffer; + recv_buffer_fast_free(buf); + droq->recv_buf_list[idx].buffer + = NULL; + } else { + droq->stats.rx_alloc_failure++; + } + + pkt_len += cpy_len; + droq->read_idx = + incr_index(droq->read_idx, 1, + droq->max_count); + droq->refill_count++; + } + } + + if (nicbuf) { + if (droq->ops.fptr) { + droq->ops.fptr(oct->octeon_id, + nicbuf, pkt_len, + rh, &droq->napi, + droq->ops.farg); + } else { + recv_buffer_free(nicbuf); + } + } + } + + if (droq->refill_count >= droq->refill_threshold) { + int desc_refilled = octeon_droq_refill(oct, droq); + + if (desc_refilled) { + /* Flush the droq descriptor data to memory to + * be sure that when we update the credits the + * data in memory is accurate. + */ + wmb(); + writel(desc_refilled, droq->pkts_credit_reg); + } + } + } /* for (each packet)... */ + + /* Increment refill_count by the number of buffers processed. */ + droq->stats.pkts_received += pkt; + droq->stats.bytes_received += total_len; + + retval = pkt; + if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { + octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); + + droq->stats.dropped_toomany += (pkts_to_process - pkt); + retval = pkts_to_process; + } + + atomic_sub(retval, &droq->pkts_pending); + + if (droq->refill_count >= droq->refill_threshold && + readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) { + octeon_droq_check_hw_for_pkts(droq); + + /* Make sure there are no pkts_pending */ + if (!atomic_read(&droq->pkts_pending)) + octeon_schedule_rxq_oom_work(oct, droq); + } + + return retval; +} + +int +octeon_droq_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 budget) +{ + u32 pkt_count = 0; + struct list_head *tmp, *tmp2; + + octeon_droq_check_hw_for_pkts(droq); + pkt_count = atomic_read(&droq->pkts_pending); + + if (!pkt_count) + return 0; + + if (pkt_count > budget) + pkt_count = budget; + + octeon_droq_fast_process_packets(oct, droq, pkt_count); + + list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { + struct __dispatch *rdisp = (struct __dispatch *)tmp; + + list_del(tmp); + rdisp->disp_fn(rdisp->rinfo, + octeon_get_dispatch_arg + (oct, + (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, + (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); + } + + /* If there are packets pending. schedule tasklet again */ + if (atomic_read(&droq->pkts_pending)) + return 1; + + return 0; +} + +/* + * Utility function to poll for packets. check_hw_for_packets must be + * called before calling this routine. + */ + +int +octeon_droq_process_poll_pkts(struct octeon_device *oct, + struct octeon_droq *droq, u32 budget) +{ + struct list_head *tmp, *tmp2; + u32 pkts_available = 0, pkts_processed = 0; + u32 total_pkts_processed = 0; + + if (budget > droq->max_count) + budget = droq->max_count; + + while (total_pkts_processed < budget) { + octeon_droq_check_hw_for_pkts(droq); + + pkts_available = min((budget - total_pkts_processed), + (u32)(atomic_read(&droq->pkts_pending))); + + if (pkts_available == 0) + break; + + pkts_processed = + octeon_droq_fast_process_packets(oct, droq, + pkts_available); + + total_pkts_processed += pkts_processed; + } + + list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { + struct __dispatch *rdisp = (struct __dispatch *)tmp; + + list_del(tmp); + rdisp->disp_fn(rdisp->rinfo, + octeon_get_dispatch_arg + (oct, + (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, + (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); + } + + return total_pkts_processed; +} + +/* Enable Pkt Interrupt */ +int +octeon_enable_irq(struct octeon_device *oct, u32 q_no) +{ + switch (oct->chip_id) { + case OCTEON_CN66XX: + case OCTEON_CN68XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + unsigned long flags; + u32 value; + + spin_lock_irqsave + (&cn6xxx->lock_for_droq_int_enb_reg, flags); + value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value); + value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value); + + /* don't bother flushing the enables */ + + spin_unlock_irqrestore + (&cn6xxx->lock_for_droq_int_enb_reg, flags); + } + break; + case OCTEON_CN23XX_PF_VID: + lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); + break; + + case OCTEON_CN23XX_VF_VID: + lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]); + break; + default: + dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__); + return 1; + } + + return 0; +} + +int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, + struct octeon_droq_ops *ops) +{ + struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; + + oct_cfg = octeon_get_conf(oct); + + if (!oct_cfg) + return -EINVAL; + + if (!(ops)) { + dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", + __func__); + return -EINVAL; + } + + if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { + dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", + __func__, q_no, (oct->num_oqs - 1)); + return -EINVAL; + } + + droq = oct->droq[q_no]; + memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); + + return 0; +} + +int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) +{ + struct octeon_config *oct_cfg = NULL; + struct octeon_droq *droq; + + oct_cfg = octeon_get_conf(oct); + + if (!oct_cfg) + return -EINVAL; + + if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { + dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", + __func__, q_no, oct->num_oqs - 1); + return -EINVAL; + } + + droq = oct->droq[q_no]; + + if (!droq) { + dev_info(&oct->pci_dev->dev, + "Droq id (%d) not available.\n", q_no); + return 0; + } + + droq->ops.fptr = NULL; + droq->ops.farg = NULL; + droq->ops.drop_on_max = 0; + + return 0; +} + +int octeon_create_droq(struct octeon_device *oct, + u32 q_no, u32 num_descs, + u32 desc_size, void *app_ctx) +{ + struct octeon_droq *droq; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + if (oct->droq[q_no]) { + dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", + q_no); + return 1; + } + + /* Allocate the DS for the new droq. */ + droq = vmalloc_node(sizeof(*droq), numa_node); + if (!droq) + droq = vmalloc(sizeof(*droq)); + if (!droq) + return -1; + + memset(droq, 0, sizeof(struct octeon_droq)); + + /*Disable the pkt o/p for this Q */ + octeon_set_droq_pkt_op(oct, q_no, 0); + oct->droq[q_no] = droq; + + /* Initialize the Droq */ + if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) { + vfree(oct->droq[q_no]); + oct->droq[q_no] = NULL; + return -1; + } + + oct->num_oqs++; + + dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, + oct->num_oqs); + + /* Global Droq register settings */ + + /* As of now not required, as setting are done for all 32 Droqs at + * the same time. + */ + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h new file mode 100644 index 000000000..c9b19e624 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -0,0 +1,416 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file octeon_droq.h + * \brief Implementation of Octeon Output queues. "Output" is with + * respect to the Octeon device on the NIC. From this driver's point of + * view they are ingress queues. + */ + +#ifndef __OCTEON_DROQ_H__ +#define __OCTEON_DROQ_H__ + +/* Default number of packets that will be processed in one iteration. */ +#define MAX_PACKET_BUDGET 0xFFFFFFFF + +/** Octeon descriptor format. + * The descriptor ring is made of descriptors which have 2 64-bit values: + * -# Physical (bus) address of the data buffer. + * -# Physical (bus) address of a octeon_droq_info structure. + * The Octeon device DMA's incoming packets and its information at the address + * given by these descriptor fields. + */ +struct octeon_droq_desc { + /** The buffer pointer */ + u64 buffer_ptr; + + /** The Info pointer */ + u64 info_ptr; +}; + +#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc)) + +/** Information about packet DMA'ed by Octeon. + * The format of the information available at Info Pointer after Octeon + * has posted a packet. Not all descriptors have valid information. Only + * the Info field of the first descriptor for a packet has information + * about the packet. + */ +struct octeon_droq_info { + /** The Length of the packet. */ + u64 length; + + /** The Output Receive Header. */ + union octeon_rh rh; +}; + +#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) + +struct octeon_skb_page_info { + /* DMA address for the page */ + dma_addr_t dma; + + /* Page for the rx dma **/ + struct page *page; + + /** which offset into page */ + unsigned int page_offset; +}; + +/** Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. + */ +struct octeon_recv_buffer { + /** Packet buffer, including metadata. */ + void *buffer; + + /** Data in the packet buffer. */ + u8 *data; + + /** pg_info **/ + struct octeon_skb_page_info pg_info; +}; + +#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) + +/** Output Queue statistics. Each output queue has four stats fields. */ +struct oct_droq_stats { + /** Number of packets received in this queue. */ + u64 pkts_received; + + /** Bytes received by this queue. */ + u64 bytes_received; + + /** Packets dropped due to no dispatch function. */ + u64 dropped_nodispatch; + + /** Packets dropped due to no memory available. */ + u64 dropped_nomem; + + /** Packets dropped due to large number of pkts to process. */ + u64 dropped_toomany; + + /** Number of packets sent to stack from this queue. */ + u64 rx_pkts_received; + + /** Number of Bytes sent to stack from this queue. */ + u64 rx_bytes_received; + + /** Num of Packets dropped due to receive path failures. */ + u64 rx_dropped; + + u64 rx_vxlan; + + /** Num of failures of recv_buffer_alloc() */ + u64 rx_alloc_failure; + +}; + +/* The maximum number of buffers that can be dispatched from the + * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that + * max packet size from DROQ is 64K. + */ +#define MAX_RECV_BUFS 64 + +/** Receive Packet format used when dispatching output queue packets + * with non-raw opcodes. + * The received packet will be sent to the upper layers using this + * structure which is passed as a parameter to the dispatch function + */ +struct octeon_recv_pkt { + /** Number of buffers in this received packet */ + u16 buffer_count; + + /** Id of the device that is sending the packet up */ + u16 octeon_id; + + /** Length of data in the packet buffer */ + u32 length; + + /** The receive header */ + union octeon_rh rh; + + /** Pointer to the OS-specific packet buffer */ + void *buffer_ptr[MAX_RECV_BUFS]; + + /** Size of the buffers pointed to by ptr's in buffer_ptr */ + u32 buffer_size[MAX_RECV_BUFS]; +}; + +#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt)) + +/** The first parameter of a dispatch function. + * For a raw mode opcode, the driver dispatches with the device + * pointer in this structure. + * For non-raw mode opcode, the driver dispatches the recv_pkt + * created to contain the buffers with data received from Octeon. + * --------------------- + * | *recv_pkt ----|--- + * |-------------------| | + * | 0 or more bytes | | + * | reserved by driver| | + * |-------------------|<-/ + * | octeon_recv_pkt | + * | | + * |___________________| + */ +struct octeon_recv_info { + void *rsvd; + struct octeon_recv_pkt *recv_pkt; +}; + +#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info)) + +/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info + * structure is filled in before this call returns. + * @param extra_bytes - extra bytes to be allocated at the end of the recv info + * structure. + * @return - pointer to a newly allocated recv_info structure. + */ +static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes) +{ + struct octeon_recv_info *recv_info; + u8 *buf; + + buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE + + extra_bytes, GFP_ATOMIC); + if (!buf) + return NULL; + + recv_info = (struct octeon_recv_info *)buf; + recv_info->recv_pkt = + (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE); + recv_info->rsvd = NULL; + if (extra_bytes) + recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE; + + return recv_info; +} + +/** Free a recv_info structure. + * @param recv_info - Pointer to receive_info to be freed + */ +static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info) +{ + kfree(recv_info); +} + +typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *); + +/** Used by NIC module to register packet handler and to get device + * information for each octeon device. + */ +struct octeon_droq_ops { + /** This registered function will be called by the driver with + * the octeon id, pointer to buffer from droq and length of + * data in the buffer. The receive header gives the port + * number to the caller. Function pointer is set by caller. + */ + void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *); + void *farg; + + /* This function will be called by the driver for all NAPI related + * events. The first param is the octeon id. The second param is the + * output queue number. The third is the NAPI event that occurred. + */ + void (*napi_fn)(void *); + + u32 poll_mode; + + /** Flag indicating if the DROQ handler should drop packets that + * it cannot handle in one iteration. Set by caller. + */ + u32 drop_on_max; +}; + +/** The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon DROQ. + */ +struct octeon_droq { + u32 q_no; + + u32 pkt_count; + + struct octeon_droq_ops ops; + + struct octeon_device *oct_dev; + + /** The 8B aligned descriptor ring starts at this address. */ + struct octeon_droq_desc *desc_ring; + + /** Index in the ring where the driver should read the next packet */ + u32 read_idx; + + /** Index in the ring where Octeon will write the next packet */ + u32 write_idx; + + /** Index in the ring where the driver will refill the descriptor's + * buffer + */ + u32 refill_idx; + + /** Packets pending to be processed */ + atomic_t pkts_pending; + + /** Number of descriptors in this ring. */ + u32 max_count; + + /** The number of descriptors pending refill. */ + u32 refill_count; + + u32 pkts_per_intr; + u32 refill_threshold; + + /** The max number of descriptors in DROQ without a buffer. + * This field is used to keep track of empty space threshold. If the + * refill_count reaches this value, the DROQ cannot accept a max-sized + * (64K) packet. + */ + u32 max_empty_descs; + + /** The receive buffer list. This list has the virtual addresses of the + * buffers. + */ + struct octeon_recv_buffer *recv_buf_list; + + /** The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + + /** Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + void __iomem *pkts_credit_reg; + + /** Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + void __iomem *pkts_sent_reg; + + struct list_head dispatch_list; + + /** Statistics for this DROQ. */ + struct oct_droq_stats stats; + + /** DMA mapped address of the DROQ descriptor ring. */ + size_t desc_ring_dma; + + /** application context */ + void *app_ctx; + + struct napi_struct napi; + + u32 cpu_id; + + call_single_data_t csd; +}; + +#define OCT_DROQ_SIZE (sizeof(struct octeon_droq)) + +/** + * Allocates space for the descriptor ring for the droq and sets the + * base addr, num desc etc in Octeon registers. + * + * @param oct_dev - pointer to the octeon device structure + * @param q_no - droq no. ranges from 0 - 3. + * @param app_ctx - pointer to application context + * @return Success: 0 Failure: 1 + */ +int octeon_init_droq(struct octeon_device *oct_dev, + u32 q_no, + u32 num_descs, + u32 desc_size, + void *app_ctx); + +/** + * Frees the space for descriptor ring for the droq. + * + * @param oct_dev - pointer to the octeon device structure + * @param q_no - droq no. ranges from 0 - 3. + * @return: Success: 0 Failure: 1 + */ +int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); + +/** Register a change in droq operations. The ops field has a pointer to a + * function which will called by the DROQ handler for all packets arriving + * on output queues given by q_no irrespective of the type of packet. + * The ops field also has a flag which if set tells the DROQ handler to + * drop packets if it receives more than what it can process in one + * invocation of the handler. + * @param oct - octeon device + * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 + * @param ops - the droq_ops settings for this queue + * @return - 0 on success, -ENODEV or -EINVAL on error. + */ +int +octeon_register_droq_ops(struct octeon_device *oct, + u32 q_no, + struct octeon_droq_ops *ops); + +/** Resets the function pointer and flag settings made by + * octeon_register_droq_ops(). After this routine is called, the DROQ handler + * will lookup dispatch function for each arriving packet on the output queue + * given by q_no. + * @param oct - octeon device + * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 + * @return - 0 on success, -ENODEV or -EINVAL on error. + */ +int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); + +/** Register a dispatch function for a opcode/subcode. The driver will call + * this dispatch function when it receives a packet with the given + * opcode/subcode in its output queues along with the user specified + * argument. + * @param oct - the octeon device to register with. + * @param opcode - the opcode for which the dispatch will be registered. + * @param subcode - the subcode for which the dispatch will be registered + * @param fn - the dispatch function. + * @param fn_arg - user specified that will be passed along with the + * dispatch function by the driver. + * @return Success: 0; Failure: 1 + */ +int octeon_register_dispatch_fn(struct octeon_device *oct, + u16 opcode, + u16 subcode, + octeon_dispatch_fn_t fn, void *fn_arg); + +void *octeon_get_dispatch_arg(struct octeon_device *oct, + u16 opcode, u16 subcode); + +void octeon_droq_print_stats(void); + +u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq); + +int octeon_create_droq(struct octeon_device *oct, u32 q_no, + u32 num_descs, u32 desc_size, void *app_ctx); + +int octeon_droq_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 budget); + +int octeon_droq_process_poll_pkts(struct octeon_device *oct, + struct octeon_droq *droq, u32 budget); + +int octeon_enable_irq(struct octeon_device *oct, u32 q_no); + +int octeon_retry_droq_refill(struct octeon_droq *droq); + +#endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h new file mode 100644 index 000000000..bebf3bd34 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -0,0 +1,399 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file octeon_iq.h + * \brief Host Driver: Implementation of Octeon input queues. "Input" is + * with respect to the Octeon device on the NIC. From this driver's + * point of view they are egress queues. + */ + +#ifndef __OCTEON_IQ_H__ +#define __OCTEON_IQ_H__ + +#define IQ_STATUS_RUNNING 1 + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +/*------------------------- INSTRUCTION QUEUE --------------------------*/ + +/* \cond */ + +#define REQTYPE_NONE 0 +#define REQTYPE_NORESP_NET 1 +#define REQTYPE_NORESP_NET_SG 2 +#define REQTYPE_RESP_NET 3 +#define REQTYPE_RESP_NET_SG 4 +#define REQTYPE_SOFT_COMMAND 5 +#define REQTYPE_LAST 5 + +struct octeon_request_list { + u32 reqtype; + void *buf; +}; + +/* \endcond */ + +/** Input Queue statistics. Each input queue has four stats fields. */ +struct oct_iq_stats { + u64 instr_posted; /**< Instructions posted to this queue. */ + u64 instr_processed; /**< Instructions processed in this queue. */ + u64 instr_dropped; /**< Instructions that could not be processed */ + u64 bytes_sent; /**< Bytes sent through this queue. */ + u64 sgentry_sent;/**< Gather entries sent through this queue. */ + u64 tx_done;/**< Num of packets sent to network. */ + u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ + u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ + u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ + u64 tx_gso; /* count of tso */ + u64 tx_vxlan; /* tunnel */ + u64 tx_dmamap_fail; /* Number of times dma mapping failed */ + u64 tx_restart; /* Number of times this queue restarted */ +}; + +#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) + +/** The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (upto 4) for + * a Octeon device has one such structure to represent it. + */ +struct octeon_instr_queue { + struct octeon_device *oct_dev; + + /** A spinlock to protect access to the input ring. */ + spinlock_t lock; + + /** A spinlock to protect while posting on the ring. */ + spinlock_t post_lock; + + /** This flag indicates if the queue can be used for soft commands. + * If this flag is set, post_lock must be acquired before posting + * a command to the queue. + * If this flag is clear, post_lock is invalid for the queue. + * All control commands (soft commands) will go through only Queue 0 + * (control and data queue). So only queue-0 needs post_lock, + * other queues are only data queues and does not need post_lock + */ + bool allow_soft_cmds; + + u32 pkt_in_done; + + u32 pkts_processed; + + /** A spinlock to protect access to the input ring.*/ + spinlock_t iq_flush_running_lock; + + /** Flag that indicates if the queue uses 64 byte commands. */ + u32 iqcmd_64B:1; + + /** Queue info. */ + union oct_txpciq txpciq; + + u32 rsvd:17; + + /* Controls whether extra flushing of IQ is done on Tx */ + u32 do_auto_flush:1; + + u32 status:8; + + /** Maximum no. of instructions in this queue. */ + u32 max_count; + + /** Index in input ring where the driver should write the next packet */ + u32 host_write_index; + + /** Index in input ring where Octeon is expected to read the next + * packet. + */ + u32 octeon_read_index; + + /** This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u32 flush_index; + + /** This field keeps track of the instructions pending in this queue. */ + atomic_t instr_pending; + + u32 reset_instr_cnt; + + /** Pointer to the Virtual Base addr of the input ring. */ + u8 *base_addr; + + struct octeon_request_list *request_list; + + /** Octeon doorbell register for the ring. */ + void __iomem *doorbell_reg; + + /** Octeon instruction count register for this ring. */ + void __iomem *inst_cnt_reg; + + /** Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /** The max. number of instructions that can be held pending by the + * driver. + */ + u32 fill_threshold; + + /** The last time that the doorbell was rung. */ + u64 last_db_time; + + /** The doorbell timeout. If the doorbell was not rung for this time and + * fill_cnt is non-zero, ring the doorbell again. + */ + u32 db_timeout; + + /** Statistics for this input queue. */ + struct oct_iq_stats stats; + + /** DMA mapped base address of the input descriptor ring. */ + dma_addr_t base_addr_dma; + + /** Application context */ + void *app_ctx; + + /* network stack queue index */ + int q_index; + + /*os ifidx associated with this queue */ + int ifidx; + +}; + +/*---------------------- INSTRUCTION FORMAT ----------------------------*/ + +/** 32-byte instruction format. + * Format of instruction for a 32-byte mode input queue. + */ +struct octeon_instr_32B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + /** Input Request Header. Additional info about the input. */ + u64 irh; + +}; + +#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B)) + +/** 64-byte instruction format. + * Format of instruction for a 64-byte mode input queue. + */ +struct octeon_instr2_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih2; + + /** Input Request Header. */ + u64 irh; + + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + u64 reserved; +}; + +struct octeon_instr3_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih3; + + /** Instruction Header. */ + u64 pki_ih3; + + /** Input Request Header. */ + u64 irh; + + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + +}; + +union octeon_instr_64B { + struct octeon_instr2_64B cmd2; + struct octeon_instr3_64B cmd3; +}; + +#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B)) + +/** The size of each buffer in soft command buffer pool + */ +#define SOFT_COMMAND_BUFFER_SIZE 2048 + +struct octeon_soft_command { + /** Soft command buffer info. */ + struct list_head node; + u64 dma_addr; + u32 size; + + /** Command and return status */ + union octeon_instr_64B cmd; + +#define COMPLETION_WORD_INIT 0xffffffffffffffffULL + u64 *status_word; + + /** Data buffer info */ + void *virtdptr; + u64 dmadptr; + u32 datasize; + + /** Return buffer info */ + void *virtrptr; + u64 dmarptr; + u32 rdatasize; + + /** Context buffer info */ + void *ctxptr; + u32 ctxsize; + + /** Time out and callback */ + size_t expiry_time; + u32 iq_no; + void (*callback)(struct octeon_device *, u32, void *); + void *callback_arg; + + int caller_is_done; + u32 sc_status; + struct completion complete; +}; + +/* max timeout (in milli sec) for soft request */ +#define LIO_SC_MAX_TMO_MS 60000 + +/** Maximum number of buffers to allocate into soft command buffer pool + */ +#define MAX_SOFT_COMMAND_BUFFERS 256 + +/** Head of a soft command buffer pool. + */ +struct octeon_sc_buffer_pool { + /** List structure to add delete pending entries to */ + struct list_head head; + + /** A lock for this response list */ + spinlock_t lock; + + atomic_t alloc_buf_count; +}; + +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) + +int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_done_list(struct octeon_device *oct); +int octeon_free_sc_zombie_list(struct octeon_device *oct); +int octeon_free_sc_buffer_pool(struct octeon_device *oct); +struct octeon_soft_command * + octeon_alloc_soft_command(struct octeon_device *oct, + u32 datasize, u32 rdatasize, + u32 ctxsize); +void octeon_free_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + +/** + * octeon_init_instr_queue() + * @param octeon_dev - pointer to the octeon device structure. + * @param txpciq - queue to be initialized (0 <= q_no <= 3). + * + * Called at driver init time for each input queue. iq_conf has the + * configuration parameters for the queue. + * + * @return Success: 0 Failure: 1 + */ +int octeon_init_instr_queue(struct octeon_device *octeon_dev, + union oct_txpciq txpciq, + u32 num_descs); + +/** + * octeon_delete_instr_queue() + * @param octeon_dev - pointer to the octeon device structure. + * @param iq_no - queue to be deleted (0 <= q_no <= 3). + * + * Called at driver unload time for each input queue. Deletes all + * allocated resources for the input queue. + * + * @return Success: 0 Failure: 1 + */ +int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no); + +int lio_wait_for_instr_fetch(struct octeon_device *oct); + +void +octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no); + +int +octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, + void (*fn)(void *)); + +int +lio_process_iq_request_list(struct octeon_device *oct, + struct octeon_instr_queue *iq, u32 napi_budget); + +int octeon_send_command(struct octeon_device *oct, u32 iq_no, + u32 force_db, void *cmd, void *buf, + u32 datasize, u32 reqtype); + +void octeon_dump_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + +void octeon_prepare_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc, + u8 opcode, u8 subcode, + u32 irh_ossp, u64 ossp0, + u64 ossp1); + +int octeon_send_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + +int octeon_setup_iq(struct octeon_device *oct, int ifidx, + int q_index, union oct_txpciq iq_no, u32 num_descs, + void *app_ctx); +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 napi_budget); +#endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c new file mode 100644 index 000000000..ad685f5d0 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c @@ -0,0 +1,375 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "octeon_mailbox.h" +#include "cn23xx_pf_device.h" + +/** + * octeon_mbox_read: + * @mbox: Pointer mailbox + * + * Reads the 8-bytes of data from the mbox register + * Writes back the acknowldgement inidcating completion of read + */ +int octeon_mbox_read(struct octeon_mbox *mbox) +{ + union octeon_mbox_message msg; + int ret = 0; + + spin_lock(&mbox->lock); + + msg.u64 = readq(mbox->mbox_read_reg); + + if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) { + spin_unlock(&mbox->lock); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { + mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64; + mbox->mbox_req.recv_len++; + } else { + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { + mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] = + msg.u64; + mbox->mbox_resp.recv_len++; + } else { + if ((mbox->state & OCTEON_MBOX_STATE_IDLE) && + (msg.s.type == OCTEON_MBOX_REQUEST)) { + mbox->state &= ~OCTEON_MBOX_STATE_IDLE; + mbox->state |= + OCTEON_MBOX_STATE_REQUEST_RECEIVING; + mbox->mbox_req.msg.u64 = msg.u64; + mbox->mbox_req.q_no = mbox->q_no; + mbox->mbox_req.recv_len = 1; + } else { + if ((mbox->state & + OCTEON_MBOX_STATE_RESPONSE_PENDING) && + (msg.s.type == OCTEON_MBOX_RESPONSE)) { + mbox->state &= + ~OCTEON_MBOX_STATE_RESPONSE_PENDING; + mbox->state |= + OCTEON_MBOX_STATE_RESPONSE_RECEIVING + ; + mbox->mbox_resp.msg.u64 = msg.u64; + mbox->mbox_resp.q_no = mbox->q_no; + mbox->mbox_resp.recv_len = 1; + } else { + writeq(OCTEON_PFVFERR, + mbox->mbox_read_reg); + mbox->state |= OCTEON_MBOX_STATE_ERROR; + spin_unlock(&mbox->lock); + return 1; + } + } + } + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) { + if (mbox->mbox_req.recv_len < mbox->mbox_req.msg.s.len) { + ret = 0; + } else { + mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING; + mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED; + ret = 1; + } + } else { + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) { + if (mbox->mbox_resp.recv_len < + mbox->mbox_resp.msg.s.len) { + ret = 0; + } else { + mbox->state &= + ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING; + mbox->state |= + OCTEON_MBOX_STATE_RESPONSE_RECEIVED; + ret = 1; + } + } else { + WARN_ON(1); + } + } + + writeq(OCTEON_PFVFACK, mbox->mbox_read_reg); + + spin_unlock(&mbox->lock); + + return ret; +} + +/** + * octeon_mbox_write: + * @oct: Pointer Octeon Device + * @mbox_cmd: Cmd to send to mailbox. + * + * Populates the queue specific mbox structure + * with cmd information. + * Write the cmd to mbox register + */ +int octeon_mbox_write(struct octeon_device *oct, + struct octeon_mbox_cmd *mbox_cmd) +{ + struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; + u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS; + long timeout = LIO_MBOX_WRITE_WAIT_TIME; + unsigned long flags; + + spin_lock_irqsave(&mbox->lock, flags); + + if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) && + !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return OCTEON_MBOX_STATUS_FAILED; + } + + if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) && + !(mbox->state & OCTEON_MBOX_STATE_IDLE)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return OCTEON_MBOX_STATUS_BUSY; + } + + if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) { + memcpy(&mbox->mbox_resp, mbox_cmd, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING; + } + + spin_unlock_irqrestore(&mbox->lock, flags); + + count = 0; + + while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) { + schedule_timeout_uninterruptible(timeout); + if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { + ret = OCTEON_MBOX_STATUS_FAILED; + break; + } + } + + if (ret == OCTEON_MBOX_STATUS_SUCCESS) { + writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg); + for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) { + count = 0; + while (readq(mbox->mbox_write_reg) != + OCTEON_PFVFACK) { + schedule_timeout_uninterruptible(timeout); + if (count++ == LIO_MBOX_WRITE_WAIT_CNT) { + ret = OCTEON_MBOX_STATUS_FAILED; + break; + } + } + if (ret == OCTEON_MBOX_STATUS_SUCCESS) + writeq(mbox_cmd->data[i], mbox->mbox_write_reg); + else + break; + } + } + + spin_lock_irqsave(&mbox->lock, flags); + if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) { + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } else { + if ((!mbox_cmd->msg.s.resp_needed) || + (ret == OCTEON_MBOX_STATUS_FAILED)) { + mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING; + if (!(mbox->state & + (OCTEON_MBOX_STATE_REQUEST_RECEIVING | + OCTEON_MBOX_STATE_REQUEST_RECEIVED))) + mbox->state = OCTEON_MBOX_STATE_IDLE; + } + } + spin_unlock_irqrestore(&mbox->lock, flags); + + return ret; +} + +static void get_vf_stats(struct octeon_device *oct, + struct oct_vf_stats *stats) +{ + int i; + + for (i = 0; i < oct->num_iqs; i++) { + if (!oct->instr_queue[i]) + continue; + stats->tx_packets += oct->instr_queue[i]->stats.tx_done; + stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes; + } + + for (i = 0; i < oct->num_oqs; i++) { + if (!oct->droq[i]) + continue; + stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; + stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received; + } +} + +/** + * octeon_mbox_process_cmd: + * @mbox: Pointer mailbox + * @mbox_cmd: Pointer to command received + * + * Process the cmd received in mbox + */ +static int octeon_mbox_process_cmd(struct octeon_mbox *mbox, + struct octeon_mbox_cmd *mbox_cmd) +{ + struct octeon_device *oct = mbox->oct_dev; + + switch (mbox_cmd->msg.s.cmd) { + case OCTEON_VF_ACTIVE: + dev_dbg(&oct->pci_dev->dev, "got vfactive sending data back\n"); + mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE; + mbox_cmd->msg.s.resp_needed = 1; + mbox_cmd->msg.s.len = 2; + mbox_cmd->data[0] = 0; /* VF version is in mbox_cmd->data[0] */ + ((struct lio_version *)&mbox_cmd->data[0])->major = + LIQUIDIO_BASE_MAJOR_VERSION; + ((struct lio_version *)&mbox_cmd->data[0])->minor = + LIQUIDIO_BASE_MINOR_VERSION; + ((struct lio_version *)&mbox_cmd->data[0])->micro = + LIQUIDIO_BASE_MICRO_VERSION; + memcpy(mbox_cmd->msg.s.params, (uint8_t *)&oct->pfvf_hsword, 6); + /* Sending core cofig info to the corresponding active VF.*/ + octeon_mbox_write(oct, mbox_cmd); + break; + + case OCTEON_VF_FLR_REQUEST: + dev_info(&oct->pci_dev->dev, + "got a request for FLR from VF that owns DPI ring %u\n", + mbox->q_no); + pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); + break; + + case OCTEON_PF_CHANGED_VF_MACADDR: + if (OCTEON_CN23XX_VF(oct)) + octeon_pf_changed_vf_macaddr(oct, + mbox_cmd->msg.s.params); + break; + + case OCTEON_GET_VF_STATS: + dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n"); + mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE; + mbox_cmd->msg.s.resp_needed = 1; + mbox_cmd->msg.s.len = 1 + + sizeof(struct oct_vf_stats) / sizeof(u64); + get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data); + octeon_mbox_write(oct, mbox_cmd); + break; + default: + break; + } + return 0; +} + +/** + * octeon_mbox_process_message + * @mbox: mailbox + * + * Process the received mbox message. + */ +int octeon_mbox_process_message(struct octeon_mbox *mbox) +{ + struct octeon_mbox_cmd mbox_cmd; + unsigned long flags; + + spin_lock_irqsave(&mbox->lock, flags); + + if (mbox->state & OCTEON_MBOX_STATE_ERROR) { + if (mbox->state & (OCTEON_MBOX_STATE_RESPONSE_PENDING | + OCTEON_MBOX_STATE_RESPONSE_RECEIVING)) { + memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + mbox_cmd.recv_status = 1; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, + mbox_cmd.fn_arg); + return 0; + } + + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVED) { + memcpy(&mbox_cmd, &mbox->mbox_resp, + sizeof(struct octeon_mbox_cmd)); + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + mbox_cmd.recv_status = 0; + if (mbox_cmd.fn) + mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg); + return 0; + } + + if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED) { + memcpy(&mbox_cmd, &mbox->mbox_req, + sizeof(struct octeon_mbox_cmd)); + if (!mbox_cmd.msg.s.resp_needed) { + mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVED; + if (!(mbox->state & + OCTEON_MBOX_STATE_RESPONSE_PENDING)) + mbox->state = OCTEON_MBOX_STATE_IDLE; + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + } + + spin_unlock_irqrestore(&mbox->lock, flags); + octeon_mbox_process_cmd(mbox, &mbox_cmd); + return 0; + } + + spin_unlock_irqrestore(&mbox->lock, flags); + WARN_ON(1); + + return 0; +} + +int octeon_mbox_cancel(struct octeon_device *oct, int q_no) +{ + struct octeon_mbox *mbox = oct->mbox[q_no]; + struct octeon_mbox_cmd *mbox_cmd; + unsigned long flags = 0; + + spin_lock_irqsave(&mbox->lock, flags); + mbox_cmd = &mbox->mbox_resp; + + if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) { + spin_unlock_irqrestore(&mbox->lock, flags); + return 1; + } + + mbox->state = OCTEON_MBOX_STATE_IDLE; + memset(mbox_cmd, 0, sizeof(*mbox_cmd)); + writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg); + spin_unlock_irqrestore(&mbox->lock, flags); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h new file mode 100644 index 000000000..d92bd7e16 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h @@ -0,0 +1,122 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +#ifndef __MAILBOX_H__ +#define __MAILBOX_H__ + +/* Macros for Mail Box Communication */ + +#define OCTEON_MBOX_DATA_MAX 32 + +#define OCTEON_VF_ACTIVE 0x1 +#define OCTEON_VF_FLR_REQUEST 0x2 +#define OCTEON_PF_CHANGED_VF_MACADDR 0x4 +#define OCTEON_GET_VF_STATS 0x8 + +/*Macro for Read acknowldgement*/ +#define OCTEON_PFVFACK 0xffffffffffffffffULL +#define OCTEON_PFVFSIG 0x1122334455667788ULL +#define OCTEON_PFVFERR 0xDEADDEADDEADDEADULL + +#define LIO_MBOX_WRITE_WAIT_CNT 1000 +#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1) + +enum octeon_mbox_cmd_status { + OCTEON_MBOX_STATUS_SUCCESS = 0, + OCTEON_MBOX_STATUS_FAILED = 1, + OCTEON_MBOX_STATUS_BUSY = 2 +}; + +enum octeon_mbox_message_type { + OCTEON_MBOX_REQUEST = 0, + OCTEON_MBOX_RESPONSE = 1 +}; + +union octeon_mbox_message { + u64 u64; + struct { + u16 type : 1; + u16 resp_needed : 1; + u16 cmd : 6; + u16 len : 8; + u8 params[6]; + } s; +}; + +typedef void (*octeon_mbox_callback_t)(void *, void *, void *); + +struct octeon_mbox_cmd { + union octeon_mbox_message msg; + u64 data[OCTEON_MBOX_DATA_MAX]; + u32 q_no; + u32 recv_len; + u32 recv_status; + octeon_mbox_callback_t fn; + void *fn_arg; +}; + +enum octeon_mbox_state { + OCTEON_MBOX_STATE_IDLE = 1, + OCTEON_MBOX_STATE_REQUEST_RECEIVING = 2, + OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4, + OCTEON_MBOX_STATE_RESPONSE_PENDING = 8, + OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16, + OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 32, + OCTEON_MBOX_STATE_ERROR = 64 +}; + +struct octeon_mbox { + /** A spinlock to protect access to this q_mbox. */ + spinlock_t lock; + + struct octeon_device *oct_dev; + + u32 q_no; + + enum octeon_mbox_state state; + + struct cavium_wk mbox_poll_wk; + + /** SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */ + void *mbox_int_reg; + + /** SLI_PKT_PF_VF_MBOX_SIG(0) for PF, SLI_PKT_PF_VF_MBOX_SIG(1) for VF. + */ + void *mbox_write_reg; + + /** SLI_PKT_PF_VF_MBOX_SIG(1) for PF, SLI_PKT_PF_VF_MBOX_SIG(0) for VF. + */ + void *mbox_read_reg; + + struct octeon_mbox_cmd mbox_req; + + struct octeon_mbox_cmd mbox_resp; + +}; + +struct oct_vf_stats_ctx { + atomic_t status; + struct oct_vf_stats *stats; +}; + +int octeon_mbox_read(struct octeon_mbox *mbox); +int octeon_mbox_write(struct octeon_device *oct, + struct octeon_mbox_cmd *mbox_cmd); +int octeon_mbox_process_message(struct octeon_mbox *mbox); +int octeon_mbox_cancel(struct octeon_device *oct, int q_no); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h new file mode 100644 index 000000000..5b4cb725f --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -0,0 +1,236 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more details. + ***********************************************************************/ +/*! \file octeon_main.h + * \brief Host Driver: This file is included by all host driver source files + * to include common definitions. + */ + +#ifndef _OCTEON_MAIN_H_ +#define _OCTEON_MAIN_H_ + +#include <linux/sched/signal.h> + +#if BITS_PER_LONG == 32 +#define CVM_CAST64(v) ((long long)(v)) +#elif BITS_PER_LONG == 64 +#define CVM_CAST64(v) ((long long)(long)(v)) +#else +#error "Unknown system architecture" +#endif + +#define DRV_NAME "LiquidIO" + +struct octeon_device_priv { + /** Tasklet structures for this device. */ + struct tasklet_struct droq_tasklet; + unsigned long napi_mask; + struct octeon_device *dev; +}; + +/** This structure is used by NIC driver to store information required + * to free the sk_buff when the packet has been fetched by Octeon. + * Bytes offset below assume worst-case of a 64-bit system. + */ +struct octnet_buf_free_info { + /** Bytes 1-8. Pointer to network device private structure. */ + struct lio *lio; + + /** Bytes 9-16. Pointer to sk_buff. */ + struct sk_buff *skb; + + /** Bytes 17-24. Pointer to gather list. */ + struct octnic_gather *g; + + /** Bytes 25-32. Physical address of skb->data or gather list. */ + u64 dptr; + + /** Bytes 33-47. Piggybacked soft command, if any */ + struct octeon_soft_command *sc; +}; + +/* BQL-related functions */ +int octeon_report_sent_bytes_to_bql(void *buf, int reqtype); +void octeon_update_tx_completion_counters(void *buf, int reqtype, + unsigned int *pkts_compl, + unsigned int *bytes_compl); +void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, + unsigned int bytes_compl); +void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac); + +void octeon_schedule_rxq_oom_work(struct octeon_device *oct, + struct octeon_droq *droq); + +/** Swap 8B blocks */ +static inline void octeon_swap_8B_data(u64 *data, u32 blocks) +{ + while (blocks) { + cpu_to_be64s(data); + blocks--; + data++; + } +} + +/** + * \brief unmaps a PCI BAR + * @param oct Pointer to Octeon device + * @param baridx bar index + */ +static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) +{ + dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", + baridx); + + if (oct->mmio[baridx].done) + iounmap(oct->mmio[baridx].hw_addr); + + if (oct->mmio[baridx].start) + pci_release_region(oct->pci_dev, baridx * 2); +} + +/** + * \brief maps a PCI BAR + * @param oct Pointer to Octeon device + * @param baridx bar index + * @param max_map_len maximum length of mapped memory + */ +static inline int octeon_map_pci_barx(struct octeon_device *oct, + int baridx, int max_map_len) +{ + u32 mapped_len = 0; + + if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { + dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n", + baridx); + return 1; + } + + oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); + oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); + + mapped_len = oct->mmio[baridx].len; + if (!mapped_len) + goto err_release_region; + + if (max_map_len && (mapped_len > max_map_len)) + mapped_len = max_map_len; + + oct->mmio[baridx].hw_addr = + ioremap(oct->mmio[baridx].start, mapped_len); + oct->mmio[baridx].mapped_len = mapped_len; + + dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n", + baridx, oct->mmio[baridx].start, mapped_len, + oct->mmio[baridx].len); + + if (!oct->mmio[baridx].hw_addr) { + dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", + baridx); + goto err_release_region; + } + oct->mmio[baridx].done = 1; + + return 0; + +err_release_region: + pci_release_region(oct->pci_dev, baridx * 2); + return 1; +} + +/* input parameter: + * sc: pointer to a soft request + * timeout: milli sec which an application wants to wait for the + response of the request. + * 0: the request will wait until its response gets back + * from the firmware within LIO_SC_MAX_TMO_MS milli sec. + * It the response does not return within + * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list() + * will move the request to zombie response list. + * + * return value: + * 0: got the response from firmware for the sc request. + * errno -EINTR: user abort the command. + * errno -ETIME: user spefified timeout value has been expired. + * errno -EBUSY: the response of the request does not return in + * resonable time (LIO_SC_MAX_TMO_MS). + * the sc wll be move to zombie response list by + * lio_process_ordered_list() + * + * A request with non-zero return value, the sc->caller_is_done + * will be marked 1. + * When getting a request with zero return value, the requestor + * should mark sc->caller_is_done with 1 after examing the + * response of sc. + * lio_process_ordered_list() will free the soft command on behalf + * of the soft command requestor. + * This is to fix the possible race condition of both timeout process + * and lio_process_ordered_list()/callback function to free a + * sc strucutre. + */ +static inline int +wait_for_sc_completion_timeout(struct octeon_device *oct_dev, + struct octeon_soft_command *sc, + unsigned long timeout) +{ + int errno = 0; + long timeout_jiff; + + if (timeout) + timeout_jiff = msecs_to_jiffies(timeout); + else + timeout_jiff = MAX_SCHEDULE_TIMEOUT; + + timeout_jiff = + wait_for_completion_interruptible_timeout(&sc->complete, + timeout_jiff); + if (timeout_jiff == 0) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -ETIME; + } else if (timeout_jiff == -ERESTARTSYS) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EINTR; + } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) { + dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n", + __func__); + WRITE_ONCE(sc->caller_is_done, true); + errno = -EBUSY; + } + + return errno; +} + +#ifndef ROUNDUP4 +#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) +#endif + +#ifndef ROUNDUP8 +#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) +#endif + +#ifndef ROUNDUP16 +#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) +#endif + +#ifndef ROUNDUP128 +#define ROUNDUP128(val) (((val) + 127) & 0xffffff80) +#endif + +#endif /* _OCTEON_MAIN_H_ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c new file mode 100644 index 000000000..7ccab3614 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -0,0 +1,201 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_mem_ops.h" + +#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP + +#ifdef __BIG_ENDIAN_BITFIELD +static inline void +octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) +{ + u32 mask; + + mask = oct->fn_list.bar1_idx_read(oct, idx); + mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); + oct->fn_list.bar1_idx_write(oct, idx, mask); +} +#else +#define octeon_toggle_bar1_swapmode(oct, idx) +#endif + +static void +octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, + u8 *hostbuf, u32 len) +{ + while ((len) && ((unsigned long)mapped_addr) & 7) { + writeb(*(hostbuf++), mapped_addr++); + len--; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len >= 8) { + writeq(*((u64 *)hostbuf), mapped_addr); + mapped_addr += 8; + hostbuf += 8; + len -= 8; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len--) + writeb(*(hostbuf++), mapped_addr++); +} + +static void +octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr, + u8 *hostbuf, u32 len) +{ + while ((len) && ((unsigned long)mapped_addr) & 7) { + *(hostbuf++) = readb(mapped_addr++); + len--; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len >= 8) { + *((u64 *)hostbuf) = readq(mapped_addr); + mapped_addr += 8; + hostbuf += 8; + len -= 8; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len--) + *(hostbuf++) = readb(mapped_addr++); +} + +/* Core mem read/write with temporary bar1 settings. */ +/* op = 1 to read, op = 0 to write. */ +static void +__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, + u8 *hostbuf, u32 len, u32 op) +{ + u32 copy_len = 0, index_reg_val = 0; + unsigned long flags; + u8 __iomem *mapped_addr; + u64 static_mapping_base; + + static_mapping_base = oct->console_nb_info.dram_region_base; + + if (static_mapping_base && + static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) { + int bar1_index = oct->console_nb_info.bar1_index; + + mapped_addr = oct->mmio[1].hw_addr + + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE)) + + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL)); + + if (op) + octeon_pci_fastread(oct, mapped_addr, hostbuf, len); + else + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len); + + return; + } + + spin_lock_irqsave(&oct->mem_access_lock, flags); + + /* Save the original index reg value. */ + index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX); + do { + oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1); + mapped_addr = oct->mmio[1].hw_addr + + (MEMOPS_IDX << 22) + (addr & 0x3fffff); + + /* If operation crosses a 4MB boundary, split the transfer + * at the 4MB + * boundary. + */ + if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) { + copy_len = (u32)(((addr & ~(0x3fffff)) + + (MEMOPS_IDX << 22)) - addr); + } else { + copy_len = len; + } + + if (op) { /* read from core */ + octeon_pci_fastread(oct, mapped_addr, hostbuf, + copy_len); + } else { + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, + copy_len); + } + + len -= copy_len; + addr += copy_len; + hostbuf += copy_len; + + } while (len); + + oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val); + + spin_unlock_irqrestore(&oct->mem_access_lock, flags); +} + +void +octeon_pci_read_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len) +{ + __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1); +} + +void +octeon_pci_write_core_mem(struct octeon_device *oct, + u64 coreaddr, + const u8 *buf, + u32 len) +{ + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0); +} + +u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr) +{ + __be64 ret; + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1); + + return be64_to_cpu(ret); +} + +u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr) +{ + __be32 ret; + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1); + + return be32_to_cpu(ret); +} + +void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr, + u32 val) +{ + __be32 t = cpu_to_be32(val); + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0); +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h new file mode 100644 index 000000000..47a3ff5f9 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h @@ -0,0 +1,72 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ + +/*! \file octeon_mem_ops.h + * \brief Host Driver: Routines used to read/write Octeon memory. + */ + +#ifndef __OCTEON_MEM_OPS_H__ +#define __OCTEON_MEM_OPS_H__ + +/** Read a 64-bit value from a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to read from. + * + * The range_idx gives the BAR1 index register for the range of address + * in which core_addr is mapped. + * + * @return 64-bit value read from Core memory + */ +u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr); + +/** Read a 32-bit value from a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to read from. + * + * @return 32-bit value read from Core memory + */ +u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr); + +/** Write a 32-bit value to a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to write to. + * @param val - 32-bit value to write. + */ +void +octeon_write_device_mem32(struct octeon_device *oct, + u64 core_addr, + u32 val); + +/** Read multiple bytes from Octeon memory. + */ +void +octeon_pci_read_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len); + +/** Write multiple bytes into Octeon memory. + */ +void +octeon_pci_write_core_mem(struct octeon_device *oct, + u64 coreaddr, + const u8 *buf, + u32 len); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h new file mode 100644 index 000000000..ebe56bd88 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -0,0 +1,626 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ + +/*! \file octeon_network.h + * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module. + */ + +#ifndef __OCTEON_NETWORK_H__ +#define __OCTEON_NETWORK_H__ +#include <linux/ptp_clock_kernel.h> + +#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) +#define LIO_MIN_MTU_SIZE ETH_MIN_MTU + +/* Bit mask values for lio->ifstate */ +#define LIO_IFSTATE_DROQ_OPS 0x01 +#define LIO_IFSTATE_REGISTERED 0x02 +#define LIO_IFSTATE_RUNNING 0x04 +#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 +#define LIO_IFSTATE_RESETTING 0x10 + +struct liquidio_if_cfg_resp { + u64 rh; + struct liquidio_if_cfg_info cfg_info; + u64 status; +}; + +#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */ +#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200 + +/* Structure of a node in list of gather components maintained by + * NIC driver for each network device. + */ +struct octnic_gather { + /* List manipulation. Next and prev pointers. */ + struct list_head list; + + /* Size of the gather component at sg in bytes. */ + int sg_size; + + /* Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /* Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; + + dma_addr_t sg_dma_ptr; +}; + +struct oct_nic_stats_resp { + u64 rh; + struct oct_link_stats stats; + u64 status; +}; + +struct oct_nic_vf_stats_resp { + u64 rh; + u64 spoofmac_cnt; + u64 status; +}; + +struct oct_nic_stats_ctrl { + struct completion complete; + struct net_device *netdev; +}; + +struct oct_nic_seapi_resp { + u64 rh; + union { + u32 fec_setting; + u32 speed; + }; + u64 status; +}; + +/** LiquidIO per-interface network private data */ +struct lio { + /** State of the interface. Rx/Tx happens only in the RUNNING state. */ + atomic_t ifstate; + + /** Octeon Interface index number. This device will be represented as + * oct<ifidx> in the system. + */ + int ifidx; + + /** Octeon Input queue to use to transmit for this network interface. */ + int txq; + + /** Octeon Output queue from which pkts arrive + * for this network interface. + */ + int rxq; + + /** Guards each glist */ + spinlock_t *glist_lock; + + /** Array of gather component linked lists */ + struct list_head *glist; + void **glists_virt_base; + dma_addr_t *glists_dma_base; + u32 glist_entry_size; + + /** Pointer to the NIC properties for the Octeon device this network + * interface is associated with. + */ + struct octdev_props *octprops; + + /** Pointer to the octeon device structure. */ + struct octeon_device *oct_dev; + + struct net_device *netdev; + + /** Link information sent by the core application for this interface. */ + struct oct_link_info linfo; + + /** counter of link changes */ + u64 link_changes; + + /** Size of Tx queue for this octeon device. */ + u32 tx_qsize; + + /** Size of Rx queue for this octeon device. */ + u32 rx_qsize; + + /** Size of MTU this octeon device. */ + u32 mtu; + + /** msg level flag per interface. */ + u32 msg_enable; + + /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ + u64 dev_capability; + + /* Copy of transmit encapsulation capabilities: + * TSO, TSO6, Checksums for this device for Kernel + * 3.10.0 onwards + */ + u64 enc_dev_capability; + + /** Copy of beacaon reg in phy */ + u32 phy_beacon_val; + + /** Copy of ctrl reg in phy */ + u32 led_ctrl_val; + + /* PTP clock information */ + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + s64 ptp_adjust; + + /* for atomic access to Octeon PTP reg and data struct */ + spinlock_t ptp_lock; + + /* Interface info */ + u32 intf_open; + + /* work queue for txq status */ + struct cavium_wq txq_status_wq; + + /* work queue for rxq oom status */ + struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES]; + + /* work queue for link status */ + struct cavium_wq link_status_wq; + + /* work queue to regularly send local time to octeon firmware */ + struct cavium_wq sync_octeon_time_wq; + + int netdev_uc_count; + struct cavium_wk stats_wk; +}; + +#define LIO_SIZE (sizeof(struct lio)) +#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) + +#define LIO_MAX_CORES 16 + +/** + * \brief Enable or disable feature + * @param netdev pointer to network device + * @param cmd Command that just requires acknowledgment + * @param param1 Parameter to command + */ +int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); + +int setup_rx_oom_poll_fn(struct net_device *netdev); + +void cleanup_rx_oom_poll_fn(struct net_device *netdev); + +/** + * \brief Link control command completion callback + * @param nctrl_ptr pointer to control packet structure + * + * This routine is called by the callback function when a ctrl pkt sent to + * core app completes. The nctrl_ptr contains a copy of the command type + * and data sent to the core app. This routine is only called if the ctrl + * pkt was sent successfully to the core app. + */ +void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); + +int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx, + u32 num_iqs, u32 num_oqs); + +irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)), + void *dev); + +int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs); + +void lio_fetch_stats(struct work_struct *work); + +int lio_wait_for_clean_oq(struct octeon_device *oct); +/** + * \brief Register ethtool operations + * @param netdev pointer to network device + */ +void liquidio_set_ethtool_ops(struct net_device *netdev); + +void lio_delete_glists(struct lio *lio); + +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs); + +int liquidio_get_speed(struct lio *lio); +int liquidio_set_speed(struct lio *lio, int speed); +int liquidio_get_fec(struct lio *lio); +int liquidio_set_fec(struct lio *lio, int on_off); + +/** + * \brief Net device change_mtu + * @param netdev network device + */ +int liquidio_change_mtu(struct net_device *netdev, int new_mtu); +#define LIO_CHANGE_MTU_SUCCESS 1 +#define LIO_CHANGE_MTU_FAIL 2 + +#define SKB_ADJ_MASK 0x3F +#define SKB_ADJ (SKB_ADJ_MASK + 1) + +#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ +#define LIO_RXBUFFER_SZ 2048 + +static inline void +*recv_buffer_alloc(struct octeon_device *oct, + struct octeon_skb_page_info *pg_info) +{ + struct page *page; + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + page = alloc_page(GFP_ATOMIC); + if (unlikely(!page)) + return NULL; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + __free_page(page); + pg_info->page = NULL; + return NULL; + } + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + /* Get DMA info */ + pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + + /* Mapping failed!! */ + if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { + __free_page(page); + dev_kfree_skb_any((struct sk_buff *)skb); + pg_info->page = NULL; + return NULL; + } + + pg_info->page = page; + pg_info->page_offset = 0; + skb_pg_info->page = page; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = pg_info->dma; + + return (void *)skb; +} + +static inline void +*recv_buffer_fast_alloc(u32 size) +{ + struct sk_buff *skb; + struct octeon_skb_page_info *skb_pg_info; + + skb = dev_alloc_skb(size + SKB_ADJ); + if (unlikely(!skb)) + return NULL; + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = NULL; + skb_pg_info->page_offset = 0; + skb_pg_info->dma = 0; + + return skb; +} + +static inline int +recv_buffer_recycle(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf; + + if (!pg_info->page) { + dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", + __func__); + return -ENOMEM; + } + + if (unlikely(page_count(pg_info->page) != 1) || + unlikely(page_to_nid(pg_info->page) != numa_node_id())) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + return -ENOMEM; + } + + /* Flip to other half of the buffer */ + if (pg_info->page_offset == 0) + pg_info->page_offset = LIO_RXBUFFER_SZ; + else + pg_info->page_offset = 0; + page_ref_inc(pg_info->page); + + return 0; +} + +static inline void +*recv_buffer_reuse(struct octeon_device *oct, void *buf) +{ + struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; + struct sk_buff *skb; + + skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); + if (unlikely(!skb)) { + dma_unmap_page(&oct->pci_dev->dev, + pg_info->dma, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); + return NULL; + } + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + skb_pg_info->page = pg_info->page; + skb_pg_info->page_offset = pg_info->page_offset; + skb_pg_info->dma = pg_info->dma; + + return skb; +} + +static inline void +recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + + if (skb) + dev_kfree_skb_any(skb); +} + +static inline void recv_buffer_free(void *buffer) +{ + struct sk_buff *skb = (struct sk_buff *)buffer; + struct octeon_skb_page_info *pg_info; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + + if (pg_info->page) { + put_page(pg_info->page); + pg_info->dma = 0; + pg_info->page = NULL; + pg_info->page_offset = 0; + } + + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void +recv_buffer_fast_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +static inline void tx_buffer_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +#define lio_dma_alloc(oct, size, dma_addr) \ + dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL) +#define lio_dma_free(oct, size, virt_addr, dma_addr) \ + dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) + +static inline +void *get_rbd(struct sk_buff *skb) +{ + struct octeon_skb_page_info *pg_info; + unsigned char *va; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + va = page_address(pg_info->page) + pg_info->page_offset; + + return va; +} + +static inline u64 +lio_map_ring(void *buf) +{ + dma_addr_t dma_addr; + + struct sk_buff *skb = (struct sk_buff *)buf; + struct octeon_skb_page_info *pg_info; + + pg_info = ((struct octeon_skb_page_info *)(skb->cb)); + if (!pg_info->page) { + pr_err("%s: pg_info->page NULL\n", __func__); + WARN_ON(1); + } + + /* Get DMA info */ + dma_addr = pg_info->dma; + if (!pg_info->dma) { + pr_err("%s: ERROR it should be already available\n", + __func__); + WARN_ON(1); + } + dma_addr += pg_info->page_offset; + + return (u64)dma_addr; +} + +static inline void +lio_unmap_ring(struct pci_dev *pci_dev, + u64 buf_ptr) + +{ + dma_unmap_page(&pci_dev->dev, + buf_ptr, (PAGE_SIZE << 0), + DMA_FROM_DEVICE); +} + +static inline void *octeon_fast_packet_alloc(u32 size) +{ + return recv_buffer_fast_alloc(size); +} + +static inline void octeon_fast_packet_next(struct octeon_droq *droq, + struct sk_buff *nicbuf, + int copy_len, + int idx) +{ + skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer), + copy_len); +} + +/** + * \brief check interface state + * @param lio per-network private data + * @param state_flag flag state to check + */ +static inline int ifstate_check(struct lio *lio, int state_flag) +{ + return atomic_read(&lio->ifstate) & state_flag; +} + +/** + * \brief set interface state + * @param lio per-network private data + * @param state_flag flag state to set + */ +static inline void ifstate_set(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); +} + +/** + * \brief clear interface state + * @param lio per-network private data + * @param state_flag flag state to clear + */ +static inline void ifstate_reset(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); +} + +/** + * \brief wait for all pending requests to complete + * @param oct Pointer to Octeon device + * + * Called during shutdown sequence + */ +static inline int wait_for_pending_requests(struct octeon_device *oct) +{ + int i, pcount = 0; + + for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) { + pcount = atomic_read( + &oct->response_list[OCTEON_ORDERED_SC_LIST] + .pending_req_count); + if (pcount) + schedule_timeout_uninterruptible(HZ / 10); + else + break; + } + + if (pcount) + return 1; + + return 0; +} + +/** + * \brief Stop Tx queues + * @param netdev network device + */ +static inline void stop_txqs(struct net_device *netdev) +{ + int i; + + for (i = 0; i < netdev->real_num_tx_queues; i++) + netif_stop_subqueue(netdev, i); +} + +/** + * \brief Wake Tx queues + * @param netdev network device + */ +static inline void wake_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i, qno; + + for (i = 0; i < netdev->real_num_tx_queues; i++) { + qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no; + + if (__netif_subqueue_stopped(netdev, i)) { + INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno, + tx_restart, 1); + netif_wake_subqueue(netdev, i); + } + } +} + +/** + * \brief Start Tx queues + * @param netdev network device + */ +static inline void start_txqs(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + int i; + + if (lio->linfo.link.s.link_up) { + for (i = 0; i < netdev->real_num_tx_queues; i++) + netif_start_subqueue(netdev, i); + } +} + +static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb) +{ + return skb->queue_mapping % oct->num_iqs; +} + +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static inline struct list_head *lio_list_delete_head(struct list_head *root) +{ + struct list_head *node; + + if (list_empty_careful(root)) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; +} + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c new file mode 100644 index 000000000..1a706f81b --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -0,0 +1,198 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" + +void * +octeon_alloc_soft_command_resp(struct octeon_device *oct, + union octeon_instr_64B *cmd, + u32 rdatasize) +{ + struct octeon_soft_command *sc; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_ih2 *ih2; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, 0, rdatasize, 0); + + if (!sc) + return NULL; + + /* Copy existing command structure into the soft command */ + memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B)); + + /* Add in the response related fields. Opcode and Param are already + * there. + */ + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { + ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + /*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + ih3->fsz = LIO_SOFTCMDRESP_IH3; + } else { + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + ih2->fsz = LIO_SOFTCMDRESP_IH2; + } + + irh->rflag = 1; /* a response is required */ + + rdp->pcie_port = oct->pcie_port; + rdp->rlen = rdatasize; + + *sc->status_word = COMPLETION_WORD_INIT; + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) + sc->cmd.cmd3.rptr = sc->dmarptr; + else + sc->cmd.cmd2.rptr = sc->dmarptr; + + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); + + return sc; +} + +int octnet_send_nic_data_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + int xmit_more) +{ + int ring_doorbell = !xmit_more; + + return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, + ndata->buf, ndata->datasize, + ndata->reqtype); +} + +static inline struct octeon_soft_command +*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl) +{ + struct octeon_soft_command *sc = NULL; + u8 *data; + u32 rdatasize; + u32 uddsize = 0, datasize = 0; + + uddsize = (u32)(nctrl->ncmd.s.more * 8); + + datasize = OCTNET_CMD_SIZE + uddsize; + rdatasize = 16; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, datasize, rdatasize, 0); + + if (!sc) + return NULL; + + data = (u8 *)sc->virtdptr; + + memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); + + octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); + + if (uddsize) { + /* Endian-Swap for UDD should have been done by caller. */ + memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); + } + + sc->iq_no = (u32)nctrl->iq_no; + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, + 0, 0, 0); + + init_completion(&sc->complete); + sc->sc_status = OCTEON_REQUEST_PENDING; + + return sc; +} + +int +octnet_send_nic_ctrl_pkt(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl) +{ + int retval; + struct octeon_soft_command *sc = NULL; + + spin_lock_bh(&oct->cmd_resp_wqlock); + /* Allow only rx ctrl command to stop traffic on the chip + * during offline operations + */ + if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) && + (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) { + spin_unlock_bh(&oct->cmd_resp_wqlock); + dev_err(&oct->pci_dev->dev, + "%s cmd:%d not processed since driver offline\n", + __func__, nctrl->ncmd.s.cmd); + return -1; + } + + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", + __func__); + spin_unlock_bh(&oct->cmd_resp_wqlock); + return -1; + } + + retval = octeon_send_soft_command(oct, sc); + if (retval == IQ_SEND_FAILED) { + octeon_free_soft_command(oct, sc); + dev_err(&oct->pci_dev->dev, "%s pf_num:%d soft command:%d send failed status: %x\n", + __func__, oct->pf_num, nctrl->ncmd.s.cmd, retval); + spin_unlock_bh(&oct->cmd_resp_wqlock); + return -1; + } + + spin_unlock_bh(&oct->cmd_resp_wqlock); + + if (nctrl->ncmd.s.cmdgroup == 0) { + switch (nctrl->ncmd.s.cmd) { + /* caller holds lock, can not sleep */ + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + case OCTNET_CMD_SET_UC_LIST: + WRITE_ONCE(sc->caller_is_done, true); + return retval; + } + } + + retval = wait_for_sc_completion_timeout(oct, sc, 0); + if (retval) + return (retval); + + nctrl->sc_status = sc->sc_status; + retval = nctrl->sc_status; + if (nctrl->cb_fn) + nctrl->cb_fn(nctrl); + + WRITE_ONCE(sc->caller_is_done, true); + + return retval; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h new file mode 100644 index 000000000..87dd6f89c --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -0,0 +1,288 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ + +/*! \file octeon_nic.h + * \brief Host NIC Driver: Routine to send network data & + * control packet to Octeon. + */ + +#ifndef __OCTEON_NIC_H__ +#define __OCTEON_NIC_H__ + +/* Maximum number of 8-byte words can be sent in a NIC control message. + */ +#define MAX_NCTRL_UDD 32 + +typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *); + +/* Structure of control information passed by the NIC module to the OSI + * layer when sending control commands to Octeon device software. + */ +struct octnic_ctrl_pkt { + /** Command to be passed to the Octeon device software. */ + union octnet_cmd ncmd; + + /** Send buffer */ + void *data; + u64 dmadata; + + /** Response buffer */ + void *rdata; + u64 dmardata; + + /** Additional data that may be needed by some commands. */ + u64 udd[MAX_NCTRL_UDD]; + + /** Input queue to use to send this command. */ + u64 iq_no; + + /** The network device that issued the control command. */ + u64 netpndev; + + /** Callback function called when the command has been fetched */ + octnic_ctrl_pkt_cb_fn_t cb_fn; + + u32 sc_status; +}; + +#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd)) + +/** Structure of data information passed by the NIC module to the OSI + * layer when forwarding data to Octeon device software. + */ +struct octnic_data_pkt { + /** Pointer to information maintained by NIC module for this packet. The + * OSI layer passes this as-is to the driver. + */ + void *buf; + + /** Type of buffer passed in "buf" above. */ + u32 reqtype; + + /** Total data bytes to be transferred in this command. */ + u32 datasize; + + /** Command to be passed to the Octeon device software. */ + union octeon_instr_64B cmd; + + /** Input queue to use to send this command. */ + u32 q_no; + +}; + +/** Structure passed by NIC module to OSI layer to prepare a command to send + * network data to Octeon. + */ +union octnic_cmd_setup { + struct { + u32 iq_no:8; + u32 gather:1; + u32 timestamp:1; + u32 ip_csum:1; + u32 transport_csum:1; + u32 tnl_csum:1; + u32 rsvd:19; + + union { + u32 datasize; + u32 gatherptrs; + } u; + } s; + + u64 u64; + +}; + +static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) +{ + return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) + >= (oct->instr_queue[q_no]->max_count - 2)); +} + +static inline void +octnet_prepare_pci_cmd_o2(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_ih2 *ih2; + struct octeon_instr_irh *irh; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[0], ossp[1] for a total of 32 bytes + */ + ih2->fsz = LIO_PCICMD_O2; + + ih2->tagtype = ORDERED_TAG; + ih2->grp = DEFAULT_POW_GRP; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + ih2->tag = tag; + else + ih2->tag = LIO_DATA(port); + + ih2->raw = 1; + ih2->qos = (port & 3) + 4; /* map qos based on interface */ + + if (!setup->s.gather) { + ih2->dlengsz = setup->s.u.datasize; + } else { + ih2->gather = 1; + ih2->dlengsz = setup->s.u.gatherptrs; + } + + irh = (struct octeon_instr_irh *)&cmd->cmd2.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +static inline void +octnet_prepare_pci_cmd_o3(struct octeon_device *oct, + union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_irh *irh; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_pki_ih3 *pki_ih3; + union octnic_packet_params packet_params; + int port; + + memset(cmd, 0, sizeof(union octeon_instr_64B)); + + ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3; + pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind; + /*PKI IH*/ + ih3->fsz = LIO_PCICMD_O3; + + if (!setup->s.gather) { + ih3->dlengsz = setup->s.u.datasize; + } else { + ih3->gather = 1; + ih3->dlengsz = setup->s.u.gatherptrs; + } + + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->utt = 1; + pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; + + port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; + + if (tag) + pki_ih3->tag = tag; + else + pki_ih3->tag = LIO_DATA(port); + + pki_ih3->tagtype = ORDERED_TAG; + pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg; + pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/ + pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/ + + irh = (struct octeon_instr_irh *)&cmd->cmd3.irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.transport_csum = setup->s.transport_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +/** Utility function to prepare a 64B NIC instruction based on a setup command + * @param cmd - pointer to instruction to be filled in. + * @param setup - pointer to the setup structure + * @param q_no - which queue for back pressure + * + * Assumes the cmd instruction is pre-allocated, but no fields are filled in. + */ +static inline void +octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + if (OCTEON_CN6XXX(oct)) + octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag); + else + octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag); +} + +/** Allocate and a soft command with space for a response immediately following + * the commnad. + * @param oct - octeon device pointer + * @param cmd - pointer to the command structure, pre-filled for everything + * except the response. + * @param rdatasize - size in bytes of the response. + * + * @returns pointer to allocated buffer with command copied into it, and + * response space immediately following. + */ +void * +octeon_alloc_soft_command_resp(struct octeon_device *oct, + union octeon_instr_64B *cmd, + u32 rdatasize); + +/** Send a NIC data packet to the device + * @param oct - octeon device pointer + * @param ndata - control structure with queueing, and buffer information + * + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and IQ_SEND_OK if it sent okay. + */ +int octnet_send_nic_data_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + int xmit_more); + +/** Send a NIC control packet to the device + * @param oct - octeon device pointer + * @param nctrl - control structure with command, timout, and callback info + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and IQ_SEND_OK if it sent okay. + */ +int +octnet_send_nic_ctrl_pkt(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c new file mode 100644 index 000000000..8e59c2825 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -0,0 +1,936 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include <linux/vmalloc.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_device.h" +#include "cn23xx_pf_device.h" +#include "cn23xx_vf_device.h" + +struct iq_post_status { + int status; + int index; +}; + +static void check_db_timeout(struct work_struct *work); +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no); + +static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); + +static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) +{ + struct octeon_instr_queue *iq = + (struct octeon_instr_queue *)oct->instr_queue[iq_no]; + return iq->iqcmd_64B; +} + +#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) + +/* Define this to return the request status comaptible to old code */ +/*#define OCTEON_USE_OLD_REQ_STATUS*/ + +/* Return 0 on success, 1 on failure */ +int octeon_init_instr_queue(struct octeon_device *oct, + union oct_txpciq txpciq, + u32 num_descs) +{ + struct octeon_instr_queue *iq; + struct octeon_iq_config *conf = NULL; + u32 iq_no = (u32)txpciq.s.q_no; + u32 q_size; + struct cavium_wq *db_wq; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + if (OCTEON_CN6XXX(oct)) + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); + else if (OCTEON_CN23XX_PF(oct)) + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf))); + else if (OCTEON_CN23XX_VF(oct)) + conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf))); + + if (!conf) { + dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", + oct->chip_id); + return 1; + } + + q_size = (u32)conf->instr_type * num_descs; + + iq = oct->instr_queue[iq_no]; + + iq->oct_dev = oct; + + iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); + if (!iq->base_addr) { + dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", + iq_no); + return 1; + } + + iq->max_count = num_descs; + + /* Initialize a list to holds requests that have been posted to Octeon + * but has yet to be fetched by octeon + */ + iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), + numa_node); + if (!iq->request_list) + iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list))); + if (!iq->request_list) { + lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); + dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", + iq_no); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n", + iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count); + + iq->txpciq.u64 = txpciq.u64; + iq->fill_threshold = (u32)conf->db_min; + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octeon_read_index = 0; + iq->flush_index = 0; + iq->last_db_time = 0; + iq->do_auto_flush = 1; + iq->db_timeout = (u32)conf->db_timeout; + atomic_set(&iq->instr_pending, 0); + iq->pkts_processed = 0; + + /* Initialize the spinlock for this instruction queue */ + spin_lock_init(&iq->lock); + if (iq_no == 0) { + iq->allow_soft_cmds = true; + spin_lock_init(&iq->post_lock); + } else { + iq->allow_soft_cmds = false; + } + + spin_lock_init(&iq->iq_flush_running_lock); + + oct->io_qmask.iq |= BIT_ULL(iq_no); + + /* Set the 32B/64B mode for each input queue */ + oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); + iq->iqcmd_64B = (conf->instr_type == 64); + + oct->fn_list.setup_iq_regs(oct, iq_no); + + oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", + WQ_MEM_RECLAIM, + 0); + if (!oct->check_db_wq[iq_no].wq) { + vfree(iq->request_list); + iq->request_list = NULL; + lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); + dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", + iq_no); + return 1; + } + + db_wq = &oct->check_db_wq[iq_no]; + + INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); + db_wq->wk.ctxptr = oct; + db_wq->wk.ctxul = iq_no; + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); + + return 0; +} + +int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) +{ + u64 desc_size = 0, q_size; + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); + destroy_workqueue(oct->check_db_wq[iq_no].wq); + + if (OCTEON_CN6XXX(oct)) + desc_size = + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx)); + else if (OCTEON_CN23XX_PF(oct)) + desc_size = + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf)); + else if (OCTEON_CN23XX_VF(oct)) + desc_size = + CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf)); + + vfree(iq->request_list); + + if (iq->base_addr) { + q_size = iq->max_count * desc_size; + lio_dma_free(oct, (u32)q_size, iq->base_addr, + iq->base_addr_dma); + oct->io_qmask.iq &= ~(1ULL << iq_no); + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + oct->num_iqs--; + return 0; + } + return 1; +} + +/* Return 0 on success, 1 on failure */ +int octeon_setup_iq(struct octeon_device *oct, + int ifidx, + int q_index, + union oct_txpciq txpciq, + u32 num_descs, + void *app_ctx) +{ + u32 iq_no = (u32)txpciq.s.q_no; + int numa_node = dev_to_node(&oct->pci_dev->dev); + + if (oct->instr_queue[iq_no]) { + dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", + iq_no); + oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64; + oct->instr_queue[iq_no]->app_ctx = app_ctx; + return 0; + } + oct->instr_queue[iq_no] = + vzalloc_node(sizeof(struct octeon_instr_queue), numa_node); + if (!oct->instr_queue[iq_no]) + oct->instr_queue[iq_no] = + vzalloc(sizeof(struct octeon_instr_queue)); + if (!oct->instr_queue[iq_no]) + return 1; + + + oct->instr_queue[iq_no]->q_index = q_index; + oct->instr_queue[iq_no]->app_ctx = app_ctx; + oct->instr_queue[iq_no]->ifidx = ifidx; + + if (octeon_init_instr_queue(oct, txpciq, num_descs)) { + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + return 1; + } + + oct->num_iqs++; + if (oct->fn_list.enable_io_queues(oct)) { + octeon_delete_instr_queue(oct, iq_no); + return 1; + } + + return 0; +} + +int lio_wait_for_instr_fetch(struct octeon_device *oct) +{ + int i, retry = 1000, pending, instr_cnt = 0; + + do { + instr_cnt = 0; + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + pending = + atomic_read(&oct->instr_queue[i]->instr_pending); + if (pending) + __check_db_timeout(oct, i); + instr_cnt += pending; + } + + if (instr_cnt == 0) + break; + + schedule_timeout_uninterruptible(1); + + } while (retry-- && instr_cnt); + + return instr_cnt; +} + +static inline void +ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) +{ + if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { + writel(iq->fill_cnt, iq->doorbell_reg); + /* make sure doorbell write goes through */ + iq->fill_cnt = 0; + iq->last_db_time = jiffies; + return; + } +} + +void +octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq; + + iq = oct->instr_queue[iq_no]; + spin_lock(&iq->post_lock); + if (iq->fill_cnt) + ring_doorbell(oct, iq); + spin_unlock(&iq->post_lock); +} + +static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, + u8 *cmd) +{ + u8 *iqptr, cmdsize; + + cmdsize = ((iq->iqcmd_64B) ? 64 : 32); + iqptr = iq->base_addr + (cmdsize * iq->host_write_index); + + memcpy(iqptr, cmd, cmdsize); +} + +static inline struct iq_post_status +__post_command2(struct octeon_instr_queue *iq, u8 *cmd) +{ + struct iq_post_status st; + + st.status = IQ_SEND_OK; + + /* This ensures that the read index does not wrap around to the same + * position if queue gets full before Octeon could fetch any instr. + */ + if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) { + st.status = IQ_SEND_FAILED; + st.index = -1; + return st; + } + + if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2)) + st.status = IQ_SEND_STOP; + + __copy_cmd_into_iq(iq, cmd); + + /* "index" is returned, host_write_index is modified. */ + st.index = iq->host_write_index; + iq->host_write_index = incr_index(iq->host_write_index, 1, + iq->max_count); + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data is in + * memory before indicating that the instruction is pending. + */ + wmb(); + + atomic_inc(&iq->instr_pending); + + return st; +} + +int +octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, + void (*fn)(void *)) +{ + if (reqtype > REQTYPE_LAST) { + dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n", + __func__, reqtype); + return -EINVAL; + } + + reqtype_free_fn[oct->octeon_id][reqtype] = fn; + + return 0; +} + +static inline void +__add_to_request_list(struct octeon_instr_queue *iq, + int idx, void *buf, int reqtype) +{ + iq->request_list[idx].buf = buf; + iq->request_list[idx].reqtype = reqtype; +} + +/* Can only run in process context */ +int +lio_process_iq_request_list(struct octeon_device *oct, + struct octeon_instr_queue *iq, u32 napi_budget) +{ + struct cavium_wq *cwq = &oct->dma_comp_wq; + int reqtype; + void *buf; + u32 old = iq->flush_index; + u32 inst_count = 0; + unsigned int pkts_compl = 0, bytes_compl = 0; + struct octeon_soft_command *sc; + unsigned long flags; + + while (old != iq->octeon_read_index) { + reqtype = iq->request_list[old].reqtype; + buf = iq->request_list[old].buf; + + if (reqtype == REQTYPE_NONE) + goto skip_this; + + octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl, + &bytes_compl); + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + case REQTYPE_RESP_NET_SG: + reqtype_free_fn[oct->octeon_id][reqtype](buf); + break; + case REQTYPE_RESP_NET: + case REQTYPE_SOFT_COMMAND: + sc = buf; + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); + break; + default: + dev_err(&oct->pci_dev->dev, + "%s Unknown reqtype: %d buf: %p at idx %d\n", + __func__, reqtype, buf, old); + } + + iq->request_list[old].buf = NULL; + iq->request_list[old].reqtype = 0; + + skip_this: + inst_count++; + old = incr_index(old, 1, iq->max_count); + + if ((napi_budget) && (inst_count >= napi_budget)) + break; + } + if (bytes_compl) + octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, + bytes_compl); + iq->flush_index = old; + + if (atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count)) + queue_work(cwq->wq, &cwq->wk.work.work); + + return inst_count; +} + +/* Can only be called from process context */ +int +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 napi_budget) +{ + u32 inst_processed = 0; + u32 tot_inst_processed = 0; + int tx_done = 1; + + if (!spin_trylock(&iq->iq_flush_running_lock)) + return tx_done; + + spin_lock_bh(&iq->lock); + + iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); + + do { + /* Process any outstanding IQ packets. */ + if (iq->flush_index == iq->octeon_read_index) + break; + + if (napi_budget) + inst_processed = + lio_process_iq_request_list(oct, iq, + napi_budget - + tot_inst_processed); + else + inst_processed = + lio_process_iq_request_list(oct, iq, 0); + + if (inst_processed) { + iq->pkts_processed += inst_processed; + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; + } + + tot_inst_processed += inst_processed; + } while (tot_inst_processed < napi_budget); + + if (napi_budget && (tot_inst_processed >= napi_budget)) + tx_done = 0; + + iq->last_db_time = jiffies; + + spin_unlock_bh(&iq->lock); + + spin_unlock(&iq->iq_flush_running_lock); + + return tx_done; +} + +/* Process instruction queue after timeout. + * This routine gets called from a workqueue or when removing the module. + */ +static void __check_db_timeout(struct octeon_device *oct, u64 iq_no) +{ + struct octeon_instr_queue *iq; + u64 next_time; + + if (!oct) + return; + + iq = oct->instr_queue[iq_no]; + if (!iq) + return; + + /* return immediately, if no work pending */ + if (!atomic_read(&iq->instr_pending)) + return; + /* If jiffies - last_db_time < db_timeout do nothing */ + next_time = iq->last_db_time + iq->db_timeout; + if (!time_after(jiffies, (unsigned long)next_time)) + return; + iq->last_db_time = jiffies; + + /* Flush the instruction queue */ + octeon_flush_iq(oct, iq, 0); + + lio_enable_irq(NULL, iq); +} + +/* Called by the Poll thread at regular intervals to check the instruction + * queue for commands to be posted and for commands that were fetched by Octeon. + */ +static void check_db_timeout(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + u64 iq_no = wk->ctxul; + struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; + u32 delay = 10; + + __check_db_timeout(oct, iq_no); + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); +} + +int +octeon_send_command(struct octeon_device *oct, u32 iq_no, + u32 force_db, void *cmd, void *buf, + u32 datasize, u32 reqtype) +{ + int xmit_stopped; + struct iq_post_status st; + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + /* Get the lock and prevent other tasks and tx interrupt handler from + * running. + */ + if (iq->allow_soft_cmds) + spin_lock_bh(&iq->post_lock); + + st = __post_command2(iq, cmd); + + if (st.status != IQ_SEND_FAILED) { + xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype); + __add_to_request_list(iq, st.index, buf, reqtype); + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); + + if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db || + xmit_stopped || st.status == IQ_SEND_STOP) + ring_doorbell(oct, iq); + } else { + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); + } + + if (iq->allow_soft_cmds) + spin_unlock_bh(&iq->post_lock); + + /* This is only done here to expedite packets being flushed + * for cases where there are no IQ completion interrupts. + */ + + return st.status; +} + +void +octeon_prepare_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc, + u8 opcode, + u8 subcode, + u32 irh_ossp, + u64 ossp0, + u64 ossp1) +{ + struct octeon_config *oct_cfg; + struct octeon_instr_ih2 *ih2; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_pki_ih3 *pki_ih3; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + WARN_ON(opcode > 15); + WARN_ON(subcode > 127); + + oct_cfg = octeon_get_conf(oct); + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { + ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; + + ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; + + pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; + + pki_ih3->w = 1; + pki_ih3->raw = 1; + pki_ih3->utag = 1; + pki_ih3->uqpg = + oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; + pki_ih3->utt = 1; + pki_ih3->tag = LIO_CONTROL; + pki_ih3->tagtype = ATOMIC_TAG; + pki_ih3->qpg = + oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg; + + pki_ih3->pm = 0x7; + pki_ih3->sl = 8; + + if (sc->datasize) + ih3->dlengsz = sc->datasize; + + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + irh->opcode = opcode; + irh->subcode = subcode; + + /* opcode/subcode specific parameters (ossp) */ + irh->ossp = irh_ossp; + sc->cmd.cmd3.ossp[0] = ossp0; + sc->cmd.cmd3.ossp[1] = ossp1; + + if (sc->rdatasize) { + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; + rdp->pcie_port = oct->pcie_port; + rdp->rlen = sc->rdatasize; + + irh->rflag = 1; + /*PKI IH3*/ + /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */ + ih3->fsz = LIO_SOFTCMDRESP_IH3; + } else { + irh->rflag = 0; + /*PKI IH3*/ + /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ + ih3->fsz = LIO_PCICMD_O3; + } + + } else { + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + ih2->tagtype = ATOMIC_TAG; + ih2->tag = LIO_CONTROL; + ih2->raw = 1; + ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); + + if (sc->datasize) { + ih2->dlengsz = sc->datasize; + ih2->rs = 1; + } + + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + irh->opcode = opcode; + irh->subcode = subcode; + + /* opcode/subcode specific parameters (ossp) */ + irh->ossp = irh_ossp; + sc->cmd.cmd2.ossp[0] = ossp0; + sc->cmd.cmd2.ossp[1] = ossp1; + + if (sc->rdatasize) { + rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp; + rdp->pcie_port = oct->pcie_port; + rdp->rlen = sc->rdatasize; + + irh->rflag = 1; + /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ + ih2->fsz = LIO_SOFTCMDRESP_IH2; + } else { + irh->rflag = 0; + /* irh + ossp[0] + ossp[1] = 24 bytes */ + ih2->fsz = LIO_PCICMD_O2; + } + } +} + +int octeon_send_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc) +{ + struct octeon_instr_queue *iq; + struct octeon_instr_ih2 *ih2; + struct octeon_instr_ih3 *ih3; + struct octeon_instr_irh *irh; + u32 len; + + iq = oct->instr_queue[sc->iq_no]; + if (!iq->allow_soft_cmds) { + dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n", + sc->iq_no); + INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1); + return IQ_SEND_FAILED; + } + + if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { + ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; + if (ih3->dlengsz) { + WARN_ON(!sc->dmadptr); + sc->cmd.cmd3.dptr = sc->dmadptr; + } + irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + if (irh->rflag) { + WARN_ON(!sc->dmarptr); + WARN_ON(!sc->status_word); + *sc->status_word = COMPLETION_WORD_INIT; + sc->cmd.cmd3.rptr = sc->dmarptr; + } + len = (u32)ih3->dlengsz; + } else { + ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2; + if (ih2->dlengsz) { + WARN_ON(!sc->dmadptr); + sc->cmd.cmd2.dptr = sc->dmadptr; + } + irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh; + if (irh->rflag) { + WARN_ON(!sc->dmarptr); + WARN_ON(!sc->status_word); + *sc->status_word = COMPLETION_WORD_INIT; + sc->cmd.cmd2.rptr = sc->dmarptr; + } + len = (u32)ih2->dlengsz; + } + + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); + + return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, + len, REQTYPE_SOFT_COMMAND)); +} + +int octeon_setup_sc_buffer_pool(struct octeon_device *oct) +{ + int i; + u64 dma_addr; + struct octeon_soft_command *sc; + + INIT_LIST_HEAD(&oct->sc_buf_pool.head); + spin_lock_init(&oct->sc_buf_pool.lock); + atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0); + + for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) { + sc = (struct octeon_soft_command *) + lio_dma_alloc(oct, + SOFT_COMMAND_BUFFER_SIZE, + (dma_addr_t *)&dma_addr); + if (!sc) { + octeon_free_sc_buffer_pool(oct); + return 1; + } + + sc->dma_addr = dma_addr; + sc->size = SOFT_COMMAND_BUFFER_SIZE; + + list_add_tail(&sc->node, &oct->sc_buf_pool.head); + } + + return 0; +} + +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_buffer_pool(struct octeon_device *oct) +{ + struct list_head *tmp, *tmp2; + struct octeon_soft_command *sc; + + octeon_free_sc_zombie_list(oct); + + spin_lock_bh(&oct->sc_buf_pool.lock); + + list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { + list_del(tmp); + + sc = (struct octeon_soft_command *)tmp; + + lio_dma_free(oct, sc->size, sc, sc->dma_addr); + } + + INIT_LIST_HEAD(&oct->sc_buf_pool.head); + + spin_unlock_bh(&oct->sc_buf_pool.lock); + + return 0; +} + +struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, + u32 datasize, + u32 rdatasize, + u32 ctxsize) +{ + u64 dma_addr; + u32 size; + u32 offset = sizeof(struct octeon_soft_command); + struct octeon_soft_command *sc = NULL; + struct list_head *tmp; + + if (!rdatasize) + rdatasize = 16; + + WARN_ON((offset + datasize + rdatasize + ctxsize) > + SOFT_COMMAND_BUFFER_SIZE); + + spin_lock_bh(&oct->sc_buf_pool.lock); + + if (list_empty(&oct->sc_buf_pool.head)) { + spin_unlock_bh(&oct->sc_buf_pool.lock); + return NULL; + } + + list_for_each(tmp, &oct->sc_buf_pool.head) + break; + + list_del(tmp); + + atomic_inc(&oct->sc_buf_pool.alloc_buf_count); + + spin_unlock_bh(&oct->sc_buf_pool.lock); + + sc = (struct octeon_soft_command *)tmp; + + dma_addr = sc->dma_addr; + size = sc->size; + + memset(sc, 0, sc->size); + + sc->dma_addr = dma_addr; + sc->size = size; + + if (ctxsize) { + sc->ctxptr = (u8 *)sc + offset; + sc->ctxsize = ctxsize; + } + + /* Start data at 128 byte boundary */ + offset = (offset + ctxsize + 127) & 0xffffff80; + + if (datasize) { + sc->virtdptr = (u8 *)sc + offset; + sc->dmadptr = dma_addr + offset; + sc->datasize = datasize; + } + + /* Start rdata at 128 byte boundary */ + offset = (offset + datasize + 127) & 0xffffff80; + + if (rdatasize) { + WARN_ON(rdatasize < 16); + sc->virtrptr = (u8 *)sc + offset; + sc->dmarptr = dma_addr + offset; + sc->rdatasize = rdatasize; + sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8); + } + + return sc; +} + +void octeon_free_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc) +{ + spin_lock_bh(&oct->sc_buf_pool.lock); + + list_add_tail(&sc->node, &oct->sc_buf_pool.head); + + atomic_dec(&oct->sc_buf_pool.alloc_buf_count); + + spin_unlock_bh(&oct->sc_buf_pool.lock); +} diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c new file mode 100644 index 000000000..ac7747ccf --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -0,0 +1,234 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ +#include <linux/pci.h> +#include <linux/netdevice.h> +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_main.h" + +static void oct_poll_req_completion(struct work_struct *work); + +int octeon_setup_response_list(struct octeon_device *oct) +{ + int i, ret = 0; + struct cavium_wq *cwq; + + for (i = 0; i < MAX_RESPONSE_LISTS; i++) { + INIT_LIST_HEAD(&oct->response_list[i].head); + spin_lock_init(&oct->response_list[i].lock); + atomic_set(&oct->response_list[i].pending_req_count, 0); + } + spin_lock_init(&oct->cmd_resp_wqlock); + + oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); + if (!oct->dma_comp_wq.wq) { + dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); + return -ENOMEM; + } + + cwq = &oct->dma_comp_wq; + INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); + cwq->wk.ctxptr = oct; + oct->cmd_resp_state = OCT_DRV_ONLINE; + + return ret; +} + +void octeon_delete_response_list(struct octeon_device *oct) +{ + cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); + destroy_workqueue(oct->dma_comp_wq.wq); +} + +int lio_process_ordered_list(struct octeon_device *octeon_dev, + u32 force_quit) +{ + struct octeon_response_list *ordered_sc_list; + struct octeon_soft_command *sc; + int request_complete = 0; + int resp_to_process = MAX_ORD_REQS_TO_PROCESS; + u32 status; + u64 status64; + + octeon_free_sc_done_list(octeon_dev); + + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; + + do { + spin_lock_bh(&ordered_sc_list->lock); + + if (list_empty(&ordered_sc_list->head)) { + spin_unlock_bh(&ordered_sc_list->lock); + return 1; + } + + sc = list_first_entry(&ordered_sc_list->head, + struct octeon_soft_command, node); + + status = OCTEON_REQUEST_PENDING; + + /* check if octeon has finished DMA'ing a response + * to where rptr is pointing to + */ + status64 = *sc->status_word; + + if (status64 != COMPLETION_WORD_INIT) { + /* This logic ensures that all 64b have been written. + * 1. check byte 0 for non-FF + * 2. if non-FF, then swap result from BE to host order + * 3. check byte 7 (swapped to 0) for non-FF + * 4. if non-FF, use the low 32-bit status code + * 5. if either byte 0 or byte 7 is FF, don't use status + */ + if ((status64 & 0xff) != 0xff) { + octeon_swap_8B_data(&status64, 1); + if (((status64 & 0xff) != 0xff)) { + /* retrieve 16-bit firmware status */ + status = (u32)(status64 & 0xffffULL); + if (status) { + status = + FIRMWARE_STATUS_CODE(status); + } else { + /* i.e. no error */ + status = OCTEON_REQUEST_DONE; + } + } + } + } else if (unlikely(force_quit) || (sc->expiry_time && + time_after(jiffies, (unsigned long)sc->expiry_time))) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + + dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__); + dev_err(&octeon_dev->pci_dev->dev, + "cmd %x/%x/%llx/%llx failed, ", + irh->opcode, irh->subcode, + sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]); + dev_err(&octeon_dev->pci_dev->dev, + "timeout (%ld, %ld)\n", + (long)jiffies, (long)sc->expiry_time); + status = OCTEON_REQUEST_TIMEOUT; + } + + if (status != OCTEON_REQUEST_PENDING) { + sc->sc_status = status; + + /* we have received a response or we have timed out */ + /* remove node from linked list */ + list_del(&sc->node); + atomic_dec(&octeon_dev->response_list + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + + if (!sc->callback) { + atomic_inc(&octeon_dev->response_list + [OCTEON_DONE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_DONE_SC_LIST].head); + + if (unlikely(READ_ONCE(sc->caller_is_done))) { + /* caller does not wait for response + * from firmware + */ + if (status != OCTEON_REQUEST_DONE) { + struct octeon_instr_irh *irh; + + irh = + (struct octeon_instr_irh *) + &sc->cmd.cmd3.irh; + dev_dbg + (&octeon_dev->pci_dev->dev, + "%s: sc failed: opcode=%x, ", + __func__, irh->opcode); + dev_dbg + (&octeon_dev->pci_dev->dev, + "subcode=%x, ossp[0]=%llx, ", + irh->subcode, + sc->cmd.cmd3.ossp[0]); + dev_dbg + (&octeon_dev->pci_dev->dev, + "ossp[1]=%llx, status=%d\n", + sc->cmd.cmd3.ossp[1], + status); + } + } else { + complete(&sc->complete); + } + + spin_unlock_bh(&ordered_sc_list->lock); + } else { + /* sc with callback function */ + if (status == OCTEON_REQUEST_TIMEOUT) { + atomic_inc(&octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + head); + } + + spin_unlock_bh(&ordered_sc_list->lock); + + sc->callback(octeon_dev, status, + sc->callback_arg); + /* sc is freed by caller */ + } + + request_complete++; + + } else { + /* no response yet */ + request_complete = 0; + spin_unlock_bh + (&ordered_sc_list->lock); + } + + /* If we hit the Max Ordered requests to process every loop, + * we quit + * and let this function be invoked the next time the poll + * thread runs + * to process the remaining requests. This function can take up + * the entire CPU if there is no upper limit to the requests + * processed. + */ + if (request_complete >= resp_to_process) + break; + } while (request_complete); + + return 0; +} + +static void oct_poll_req_completion(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + struct cavium_wq *cwq = &oct->dma_comp_wq; + + lio_process_ordered_list(oct, 0); + + if (atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count)) + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); +} diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h new file mode 100644 index 000000000..ed4020d26 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -0,0 +1,143 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2016 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + **********************************************************************/ + +/*! \file response_manager.h + * \brief Host Driver: Response queues for host instructions. + */ + +#ifndef __RESPONSE_MANAGER_H__ +#define __RESPONSE_MANAGER_H__ + +/** Maximum ordered requests to process in every invocation of + * lio_process_ordered_list(). The function will continue to process requests + * as long as it can find one that has finished processing. If it keeps + * finding requests that have completed, the function can run for ever. The + * value defined here sets an upper limit on the number of requests it can + * process before it returns control to the poll thread. + */ +#define MAX_ORD_REQS_TO_PROCESS 4096 + +/** Head of a response list. There are several response lists in the + * system. One for each response order- Unordered, ordered + * and 1 for noresponse entries on each instruction queue. + */ +struct octeon_response_list { + /** List structure to add delete pending entries to */ + struct list_head head; + + /** A lock for this response list */ + spinlock_t lock; + + atomic_t pending_req_count; +}; + +/** The type of response list. + */ +enum { + OCTEON_ORDERED_LIST = 0, + OCTEON_UNORDERED_NONBLOCKING_LIST = 1, + OCTEON_UNORDERED_BLOCKING_LIST = 2, + OCTEON_ORDERED_SC_LIST = 3, + OCTEON_DONE_SC_LIST = 4, + OCTEON_ZOMBIE_SC_LIST = 5 +}; + +/** Response Order values for a Octeon Request. */ +enum { + OCTEON_RESP_ORDERED = 0, + OCTEON_RESP_UNORDERED = 1, + OCTEON_RESP_NORESPONSE = 2 +}; + +/** Error codes used in Octeon Host-Core communication. + * + * 31 16 15 0 + * --------------------------------- + * | | | + * --------------------------------- + * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number, + * are reserved to identify the group to which the error code belongs. The + * lower 16-bits, called Minor Error Number, carry the actual code. + * + * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER. + */ + +/*------------ Error codes used by host driver -----------------*/ +#define DRIVER_MAJOR_ERROR_CODE 0x0000 +/*------ Error codes used by firmware (bits 15..0 set by firmware */ +#define FIRMWARE_MAJOR_ERROR_CODE 0x0001 + +/** A value of 0x00000000 indicates no error i.e. success */ +#define DRIVER_ERROR_NONE 0x00000000 + +#define DRIVER_ERROR_REQ_PENDING 0x00000001 +#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003 +#define DRIVER_ERROR_REQ_EINTR 0x00000004 +#define DRIVER_ERROR_REQ_ENXIO 0x00000006 +#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C +#define DRIVER_ERROR_REQ_EINVAL 0x00000016 +#define DRIVER_ERROR_REQ_FAILED 0x000000ff + +/** Status for a request. + * If a request is not queued to Octeon by the driver, the driver returns + * an error condition that's describe by one of the OCTEON_REQ_ERR_* value + * below. If the request is successfully queued, the driver will return + * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and + * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the + * response for request failed to arrive before a time-out period or if + * the request processing * got interrupted due to a signal respectively. + */ +enum { + OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE), + OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING), + OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT), + OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR), + OCTEON_REQUEST_NO_DEVICE = (0x00000021), + OCTEON_REQUEST_NOT_RUNNING, + OCTEON_REQUEST_INVALID_IQ, + OCTEON_REQUEST_INVALID_BUFCNT, + OCTEON_REQUEST_INVALID_RESP_ORDER, + OCTEON_REQUEST_NO_MEMORY, + OCTEON_REQUEST_INVALID_BUFSIZE, + OCTEON_REQUEST_NO_PENDING_ENTRY, + OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF) + +}; + +#define FIRMWARE_STATUS_CODE(status) \ + ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status)) + +/** Initialize the response lists. The number of response lists to create is + * given by count. + * @param octeon_dev - the octeon device structure. + */ +int octeon_setup_response_list(struct octeon_device *octeon_dev); + +void octeon_delete_response_list(struct octeon_device *octeon_dev); + +/** Check the status of first entry in the ordered list. If the instruction at + * that entry finished processing or has timed-out, the entry is cleaned. + * @param octeon_dev - the octeon device structure. + * @param force_quit - the request is forced to timeout if this is 1 + * @return 1 if the ordered list is empty, 0 otherwise. + */ +int lio_process_ordered_list(struct octeon_device *octeon_dev, + u32 force_quit); + +#endif |