summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cavium
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/cavium
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/cavium')
-rw-r--r--drivers/net/ethernet/cavium/Kconfig103
-rw-r--r--drivers/net/ethernet/cavium/Makefile8
-rw-r--r--drivers/net/ethernet/cavium/common/Makefile2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c348
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h70
-rw-r--r--drivers/net/ethernet/cavium/liquidio/Makefile24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c1512
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h73
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h599
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c682
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h48
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h274
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c737
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h98
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h530
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c183
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h45
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c1814
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c3182
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4366
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2442
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c672
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h49
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1039
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_image.h54
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h472
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c920
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c1464
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h911
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c969
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h416
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h399
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c375
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h122
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h236
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c201
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h72
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h626
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c198
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h288
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c936
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c234
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.h143
-rw-r--r--drivers/net/ethernet/cavium/octeon/Makefile6
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c1557
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile13
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h638
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c1416
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h230
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c887
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2323
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1972
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h374
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h696
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c1718
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h263
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c231
58 files changed, 40287 insertions, 0 deletions
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 000000000..1c76c95b0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+ bool "Cavium ethernet drivers"
+ default y
+ help
+ Select this option if you want enable Cavium network support.
+
+ If you have a Cavium SoC or network adapter, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+ tristate "Thunder Physical function driver"
+ depends on 64BIT && PCI
+ select THUNDER_NIC_BGX
+ help
+ This driver supports Thunder's NIC physical function.
+ The NIC provides the controller and DMA engines to
+ move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
+
+config THUNDER_NIC_VF
+ tristate "Thunder Virtual function driver"
+ imply CAVIUM_PTP
+ depends on 64BIT && PCI
+ help
+ This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+ tristate "Thunder MAC interface driver (BGX)"
+ depends on 64BIT && PCI
+ select PHYLIB
+ select MDIO_THUNDER if PCI
+ select THUNDER_NIC_RGX
+ help
+ This driver supports programming and controlling of MAC
+ interface from NIC physical function driver.
+
+config THUNDER_NIC_RGX
+ tristate "Thunder MAC interface driver (RGX)"
+ depends on 64BIT && PCI
+ select PHYLIB
+ select MDIO_THUNDER if PCI
+ help
+ This driver supports configuring XCV block of RGX interface
+ present on CN81XX chip.
+
+config CAVIUM_PTP
+ tristate "Cavium PTP coprocessor as PTP clock"
+ depends on 64BIT && PCI
+ depends on PTP_1588_CLOCK
+ help
+ This driver adds support for the Precision Time Protocol Clocks and
+ Timestamping coprocessor (PTP) found on Cavium processors.
+ PTP provides timestamping mechanism that is suitable for use in IEEE 1588
+ Precision Time Protocol or other purposes. Timestamps can be used in
+ BGX, TNS, GTI, and NIC blocks.
+
+config LIQUIDIO
+ tristate "Cavium LiquidIO support"
+ depends on 64BIT && PCI
+ depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select FW_LOADER
+ select LIBCRC32C
+ select NET_DEVLINK
+ help
+ This driver supports Cavium LiquidIO Intelligent Server Adapters
+ based on CN66XX, CN68XX and CN23XX chips.
+
+ To compile this driver as a module, choose M here: the module
+ will be called liquidio. This is recommended.
+
+config OCTEON_MGMT_ETHERNET
+ tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)"
+ depends on CAVIUM_OCTEON_SOC
+ select PHYLIB
+ select MDIO_OCTEON
+ default y
+ help
+ Enable the ethernet driver for the management
+ port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX,
+ CN54XX, CN52XX, and CN6XXX chips.
+
+config LIQUIDIO_VF
+ tristate "Cavium LiquidIO VF support"
+ depends on 64BIT && PCI_MSI
+ depends on PTP_1588_CLOCK_OPTIONAL
+ help
+ This driver supports Cavium LiquidIO Intelligent Server Adapter
+ based on CN23XX chips.
+
+ To compile this driver as a module, choose M here: The module
+ will be called liquidio_vf. MSI-X interrupt support is required
+ for this driver to work correctly
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 000000000..5d3280821
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += common/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/
diff --git a/drivers/net/ethernet/cavium/common/Makefile b/drivers/net/ethernet/cavium/common/Makefile
new file mode 100644
index 000000000..e3f87bd65
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CAVIUM_PTP) += cavium_ptp.o
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
new file mode 100644
index 000000000..9fd717b9c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+/* cavium_ptp.c - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/timecounter.h>
+#include <linux/pci.h>
+
+#include "cavium_ptp.h"
+
+#define DRV_NAME "cavium_ptp"
+
+#define PCI_DEVICE_ID_CAVIUM_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_88XX_PTP 0xA10C
+#define PCI_SUBSYS_DEVID_81XX_PTP 0XA20C
+#define PCI_SUBSYS_DEVID_83XX_PTP 0xA30C
+#define PCI_DEVICE_ID_CAVIUM_RST 0xA00E
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 ptp_cavium_clock_get(void)
+{
+ struct pci_dev *pdev;
+ void __iomem *base;
+ u64 ret = CLOCK_BASE_RATE * 16;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ ret = CLOCK_BASE_RATE * ((readq(base + RST_BOOT) >> 33) & 0x3f);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct cavium_ptp *cavium_ptp_get(void)
+{
+ struct cavium_ptp *ptp;
+ struct pci_dev *pdev;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_CAVIUM_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+EXPORT_SYMBOL(cavium_ptp_get);
+
+void cavium_ptp_put(struct cavium_ptp *ptp)
+{
+ if (!ptp)
+ return;
+ pci_dev_put(ptp->pdev);
+}
+EXPORT_SYMBOL(cavium_ptp_put);
+
+/**
+ * cavium_ptp_adjfine() - Adjust ptp frequency
+ * @ptp_info: PTP clock info
+ * @scaled_ppm: how much to adjust by, in parts per million, but with a
+ * 16 bit binary fractional field
+ */
+static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 comp;
+ u64 adj;
+ bool neg_adj = false;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per bilion" by which the
+ * compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated initialy
+ * in cavium_ptp_init() -> tbase = 1/Hz. Then we use endian
+ * independent structure definition to write data to PTP register.
+ */
+ comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ adj = comp * scaled_ppm;
+ adj >>= 16;
+ adj = div_u64(adj, 1000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ writeq(comp, clock->reg_base + PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_adjtime() - Adjust ptp time
+ * @ptp_info: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_adjtime(&clock->time_counter, delta);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ /* Sync, for network driver to get latest value */
+ smp_mb();
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_gettime() - Get hardware clock time with adjustment
+ * @ptp_info: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ nsec = timecounter_read(&clock->time_counter);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_settime() - Set hardware clock time. Reset adjustment
+ * @ptp_info: PTP clock info
+ * @ts: timespec
+ */
+static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct cavium_ptp *clock =
+ container_of(ptp_info, struct cavium_ptp, ptp_info);
+ unsigned long flags;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&clock->spin_lock, flags);
+ timecounter_init(&clock->time_counter, &clock->cycle_counter, nsec);
+ spin_unlock_irqrestore(&clock->spin_lock, flags);
+
+ return 0;
+}
+
+/**
+ * cavium_ptp_enable() - Request to enable or disable an ancillary feature.
+ * @ptp_info: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int cavium_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static u64 cavium_ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct cavium_ptp *clock =
+ container_of(cc, struct cavium_ptp, cycle_counter);
+
+ return readq(clock->reg_base + PTP_CLOCK_HI);
+}
+
+static int cavium_ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct cavium_ptp *clock;
+ struct cyclecounter *cc;
+ u64 clock_cfg;
+ u64 clock_comp;
+ int err;
+
+ clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
+ if (!clock) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ clock->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ clock->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ spin_lock_init(&clock->spin_lock);
+
+ cc = &clock->cycle_counter;
+ cc->read = cavium_ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&clock->time_counter, &clock->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ clock->clock_rate = ptp_cavium_clock_get();
+
+ clock->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "ThunderX PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = cavium_ptp_adjfine,
+ .adjtime = cavium_ptp_adjtime,
+ .gettime64 = cavium_ptp_gettime,
+ .settime64 = cavium_ptp_settime,
+ .enable = cavium_ptp_enable,
+ };
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / clock->clock_rate;
+ writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP);
+
+ clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev);
+ if (IS_ERR(clock->ptp_clock)) {
+ err = PTR_ERR(clock->ptp_clock);
+ goto error_stop;
+ }
+
+ pci_set_drvdata(pdev, clock);
+ return 0;
+
+error_stop:
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+ pcim_iounmap_regions(pdev, 1 << PCI_PTP_BAR_NO);
+
+error_free:
+ devm_kfree(dev, clock);
+
+error:
+ /* For `cavium_ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void cavium_ptp_remove(struct pci_dev *pdev)
+{
+ struct cavium_ptp *clock = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(clock))
+ return;
+
+ ptp_clock_unregister(clock->ptp_clock);
+
+ clock_cfg = readq(clock->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, clock->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id cavium_ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_88XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_81XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_CAVIUM_PTP,
+ PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_83XX_PTP) },
+ { 0, }
+};
+
+static struct pci_driver cavium_ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = cavium_ptp_id_table,
+ .probe = cavium_ptp_probe,
+ .remove = cavium_ptp_remove,
+};
+
+module_pci_driver(cavium_ptp_driver);
+
+MODULE_DESCRIPTION(DRV_NAME);
+MODULE_AUTHOR("Cavium Networks <support@cavium.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(pci, cavium_ptp_id_table);
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
new file mode 100644
index 000000000..1e0ffe8f4
--- /dev/null
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* cavium_ptp.h - PTP 1588 clock on Cavium hardware
+ * Copyright (c) 2003-2015, 2017 Cavium, Inc.
+ */
+
+#ifndef CAVIUM_PTP_H
+#define CAVIUM_PTP_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
+struct cavium_ptp {
+ struct pci_dev *pdev;
+
+ /* Serialize access to cycle_counter, time_counter and hw_registers */
+ spinlock_t spin_lock;
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+ void __iomem *reg_base;
+
+ u32 clock_rate;
+
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+};
+
+#if IS_REACHABLE(CONFIG_CAVIUM_PTP)
+
+struct cavium_ptp *cavium_ptp_get(void);
+void cavium_ptp_put(struct cavium_ptp *ptp);
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ unsigned long flags;
+ u64 ret;
+
+ spin_lock_irqsave(&ptp->spin_lock, flags);
+ ret = timecounter_cyc2time(&ptp->time_counter, tstamp);
+ spin_unlock_irqrestore(&ptp->spin_lock, flags);
+
+ return ret;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return ptp_clock_index(clock->ptp_clock);
+}
+
+#else
+
+static inline struct cavium_ptp *cavium_ptp_get(void)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void cavium_ptp_put(struct cavium_ptp *ptp) {}
+
+static inline u64 cavium_ptp_tstamp2time(struct cavium_ptp *ptp, u64 tstamp)
+{
+ return 0;
+}
+
+static inline int cavium_ptp_clock_index(struct cavium_ptp *clock)
+{
+ return -1;
+}
+
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile
new file mode 100644
index 000000000..bc9937502
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/Makefile
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Cavium Liquidio ethernet device driver
+#
+
+common-objs := lio_ethtool.o \
+ lio_core.o \
+ request_manager.o \
+ response_manager.o \
+ octeon_device.o \
+ cn66xx_device.o \
+ cn68xx_device.o \
+ cn23xx_pf_device.o \
+ cn23xx_vf_device.o \
+ octeon_mailbox.o \
+ octeon_mem_ops.o \
+ octeon_droq.o \
+ octeon_nic.o
+
+obj-$(CONFIG_LIQUIDIO) += liquidio.o
+liquidio-y := lio_main.o octeon_console.o lio_vf_rep.o $(common-objs)
+
+obj-$(CONFIG_LIQUIDIO_VF) += liquidio_vf.o
+liquidio_vf-y := lio_vf_main.o $(common-objs)
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
new file mode 100644
index 000000000..9ed3d1ab2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c
@@ -0,0 +1,1512 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/etherdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_pf_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+
+#define RESET_NOTDONE 0
+#define RESET_DONE 1
+
+/* Change the value of SLI Packet Input Jabber Register to allow
+ * VXLAN TSO packets which can be 64424 bytes, exceeding the
+ * MAX_GSO_SIZE we supplied to the kernel
+ */
+#define CN23XX_INPUT_JABBER 64600
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
+{
+ int i = 0;
+ u32 regval = 0;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ /*In cn23xx_soft_reset*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
+ "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
+
+ /*In cn23xx_set_dpi_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
+ lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_ENB", i,
+ CN23XX_DPI_DMA_ENG_ENB(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_DMA_ENG_BUF", i,
+ CN23XX_DPI_DMA_ENG_BUF(i),
+ lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
+ CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
+
+ /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_CONFIG_PCIE_DEVCTL",
+ CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
+
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
+ CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
+ lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
+
+ /*In cn23xx_specific_regs_setup */
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
+ CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
+ (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+
+ /*In cn23xx_setup_global_mac_regs*/
+ for (i = 0; i < CN23XX_MAX_MACS; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_MAC_RINFO64", i,
+ CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64
+ (i, oct->pf_num))));
+ }
+
+ /*In cn23xx_setup_global_input_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_PKT_CONTROL64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
+ CVM_CAST64(octeon_read_csr64
+ (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
+ }
+
+ /*In cn23xx_setup_global_output_regs*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_CONTROL", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
+ }
+
+ /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_enb_reg64",
+ CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "cn23xx->intr_sum_reg64",
+ CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
+ CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
+
+ /*In cn23xx_setup_iq_regs*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_IQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_DOORBELL", i,
+ CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_DOORBELL(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_IQ_INSTR_COUNT64", i,
+ CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
+ }
+
+ /*In cn23xx_setup_oq_regs*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BASE_ADDR64", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
+ CVM_CAST64(octeon_read_csr
+ (oct, CN23XX_SLI_OQ_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
+ CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
+ CVM_CAST64(octeon_read_csr(
+ oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_SENT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
+ dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_OQ_PKTS_CREDIT", i,
+ CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
+ CVM_CAST64(octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_TIME_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
+ dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
+ "CN23XX_SLI_PKT_CNT_INT",
+ CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
+ CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
+}
+
+static int cn23xx_pf_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
+ oct->octeon_id);
+
+ octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
+
+ /* Initiate chip-wide soft reset */
+ lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
+
+ /* Wait for 100ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
+ oct->octeon_id);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
+ oct->octeon_id);
+
+ /* restore the reset value*/
+ octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+static void cn23xx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 regval;
+ u32 uncorrectable_err_mask, corrtable_err_status;
+
+ pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
+ if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
+ uncorrectable_err_mask = 0;
+ corrtable_err_status = 0;
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
+ &uncorrectable_err_mask);
+ pci_read_config_dword(oct->pci_dev,
+ CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
+ &corrtable_err_status);
+ dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
+ "\tdev_ctl_status_reg = 0x%08x\n"
+ "\tuncorrectable_error_mask_reg = 0x%08x\n"
+ "\tcorrectable_error_status_reg = 0x%08x\n",
+ regval, uncorrectable_err_mask,
+ corrtable_err_status);
+ }
+
+ regval |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
+ oct->octeon_id);
+ pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
+}
+
+static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
+ * for SLI.
+ */
+
+ /* TBD: get the info in Hand-shake */
+ return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
+}
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
+
+ oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
+{
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+ u64 reg_val;
+ u64 temp;
+
+ /* programming SRN and TRS for each MAC(0..3) */
+
+ dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
+ __func__, mac_no);
+ /* By default, mapping all 64 IOQs to a single MACs */
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
+
+ if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ } else {
+ /* setting SRN <6:0> */
+ reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
+ }
+
+ /* setting TRS <23:16> */
+ reg_val = reg_val |
+ (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
+ /* setting RPVF <39:32> */
+ temp = oct->sriov_info.rings_per_vf & 0xff;
+ reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
+
+ /* setting NVFS <55:48> */
+ temp = oct->sriov_info.max_vfs & 0xff;
+ reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
+
+ /* write these settings to MAC register */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
+ reg_val);
+
+ dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
+ mac_no, pf_num, (u64)octeon_read_csr64
+ (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
+}
+
+static int cn23xx_reset_io_queues(struct octeon_device *oct)
+{
+ int ret_val = 0;
+ u64 d64;
+ u32 q_no, srn, ern;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ /*As per HRM reg description, s/w cant write 0 to ENB. */
+ /*to make the queue off, need to set the RST bit. */
+
+ /* Reset the Enable bit for all the 64 IQs. */
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
+ }
+
+ /*wait until the RST bit is clear or the RST and quite bits are set*/
+ for (q_no = srn; q_no < ern; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop--) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ struct octeon_instr_queue *iq;
+ u64 intr_threshold, reg_val;
+ u32 q_no, ern, srn;
+ u64 pf_num;
+ u64 vf_num;
+
+ pf_num = oct->pf_num;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (cn23xx_reset_io_queues(oct))
+ return -1;
+
+ /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
+ * for all queues.Only PF can set these bits.
+ * bits 29:30 indicate the MAC num.
+ * bits 32:47 indicate the PVF num.
+ */
+ for (q_no = 0; q_no < ern; q_no++) {
+ reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
+
+ /* for VF assigned queues. */
+ if (q_no < oct->sriov_info.pf_srn) {
+ vf_num = q_no / oct->sriov_info.rings_per_vf;
+ vf_num += 1; /* VF1, VF2,........ */
+ } else {
+ vf_num = 0;
+ }
+
+ reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
+ reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+ }
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * pf queues
+ */
+ for (q_no = srn; q_no < ern; q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ iq = oct->instr_queue[q_no];
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
+
+ reg_val =
+ octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
+
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ /* Set WMARK level for triggering PI_INT */
+ /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no, ern, srn;
+ u64 time_threshold;
+
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->sriov_info.num_pf_rings;
+
+ if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
+ } else {
+ /** Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
+ }
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList
+ * reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data
+ * reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+
+ /* Enabling these interrupt in oct->fn_list.enable_interrupt()
+ * routine which called after IOQ init.
+ * Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ (time_threshold << 32)));
+ }
+
+ /** Setting the water mark level for pko back pressure **/
+ writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
+
+ /** Disabling setting OQs in reset when ring has no dorebells
+ * enabling this will cause of head of line blocking
+ */
+ /* Do it only for pass1.1. and pass1.2 */
+ if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
+ (oct->rev_id == OCTEON_CN23XX_REV_1_1))
+ writeq(readq((u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_GBL_CONTROL) | 0x2,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
+
+ /** Enable channel-level backpressure */
+ if (oct->pf_num)
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
+ else
+ writeq(0xffffffffffffffffULL,
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
+}
+
+static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
+{
+ cn23xx_enable_error_reporting(oct);
+
+ /* program the MAC(0..3)_RINFO before setting up input/output regs */
+ cn23xx_setup_global_mac_regs(oct);
+
+ if (cn23xx_pf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_pf_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
+ CN23XX_SLI_WINDOW_CTL_DEFAULT);
+
+ /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
+ octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
+ return 0;
+}
+
+static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ iq_no += oct->sriov_info.pf_srn;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ } else {
+ /* Clear the count by writing back what we read, but don't
+ * enable interrupts
+ */
+ writeq(pkt_in_done, iq->inst_cnt_reg);
+ }
+
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 reg_val;
+ struct octeon_droq *droq = oct->droq[oq_no];
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 time_threshold;
+ u64 cnt_threshold;
+
+ oq_no += oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ droq->buffer_size);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ if (!oct->msix_on) {
+ /* Enable this output queue to generate Packet Timer Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+
+ /* Enable this output queue to generate Packet Count Interrupt
+ */
+ reg_val =
+ octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
+ reg_val);
+ } else {
+ time_threshold = cn23xx_pf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+ cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
+ ((time_threshold << 32 | cnt_threshold)));
+ }
+}
+
+static void cn23xx_pf_mbox_thread(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
+ struct octeon_device *oct = mbox->oct_dev;
+ u64 mbox_int_val, val64;
+ u32 q_no, i;
+
+ if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
+ /*read and clear by writing 1*/
+ mbox_int_val = readq(mbox->mbox_int_reg);
+ writeq(mbox_int_val, mbox->mbox_int_reg);
+
+ for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ val64 = readq(oct->mbox[q_no]->mbox_write_reg);
+
+ if (val64 && (val64 != OCTEON_PFVFACK)) {
+ if (octeon_mbox_read(oct->mbox[q_no]))
+ octeon_mbox_process_message(
+ oct->mbox[q_no]);
+ }
+ }
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
+ } else {
+ octeon_mbox_process_message(mbox);
+ }
+}
+
+static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
+{
+ struct octeon_mbox *mbox = NULL;
+ u16 mac_no = oct->pcie_port;
+ u16 pf_num = oct->pf_num;
+ u32 q_no, i;
+
+ if (!oct->sriov_info.max_vfs)
+ return 0;
+
+ for (i = 0; i < oct->sriov_info.max_vfs; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ mbox = vmalloc(sizeof(*mbox));
+ if (!mbox)
+ goto free_mbox;
+
+ memset(mbox, 0, sizeof(struct octeon_mbox));
+
+ spin_lock_init(&mbox->lock);
+
+ mbox->oct_dev = oct;
+
+ mbox->q_no = q_no;
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+
+ /* PF mbox interrupt reg */
+ mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
+
+ /* PF writes into SIG0 reg */
+ mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
+
+ /* PF reads from SIG1 reg */
+ mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
+
+ /*Mail Box Thread creation*/
+ INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
+ cn23xx_pf_mbox_thread);
+ mbox->mbox_poll_wk.ctxptr = (void *)mbox;
+
+ oct->mbox[q_no] = mbox;
+
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
+ schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
+ msecs_to_jiffies(0));
+
+ return 0;
+
+free_mbox:
+ while (i) {
+ i--;
+ vfree(oct->mbox[i]);
+ }
+
+ return 1;
+}
+
+static int cn23xx_free_pf_mbox(struct octeon_device *oct)
+{
+ u32 q_no, i;
+
+ if (!oct->sriov_info.max_vfs)
+ return 0;
+
+ for (i = 0; i < oct->sriov_info.max_vfs; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+ cancel_delayed_work_sync(
+ &oct->mbox[q_no]->mbox_poll_wk.work);
+ vfree(oct->mbox[q_no]);
+ }
+
+ return 0;
+}
+
+static int cn23xx_enable_io_queues(struct octeon_device *oct)
+{
+ u64 reg_val;
+ u32 srn, ern, q_no;
+ u32 loop = 1000;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ for (q_no = srn; q_no < ern; q_no++) {
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
+ /* IOQs are in reset by default in PEM2 mode,
+ * clearing reset bit
+ */
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val &
+ CN23XX_PKT_INPUT_CTL_QUIET) &&
+ --loop) {
+ reg_val = octeon_read_csr64(
+ oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ }
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = srn; q_no < ern; q_no++) {
+ u32 reg_val;
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+ }
+ return 0;
+}
+
+static void cn23xx_disable_io_queues(struct octeon_device *oct)
+{
+ int q_no, loop;
+ u64 d64;
+ u32 d32;
+ u32 srn, ern;
+
+ srn = oct->sriov_info.pf_srn;
+ ern = srn + oct->num_iqs;
+
+ /*** Disable Input Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* start the Reset for a particular ring */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
+ WRITE_ONCE(d64, READ_ONCE(d64) &
+ (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
+ WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(d64));
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Input Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
+ while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+ }
+
+ /*** Disable Output Queues. ***/
+ for (q_no = srn; q_no < ern; q_no++) {
+ loop = HZ;
+
+ /* Wait until hardware indicates that the particular IQ
+ * is out of reset.It given that SLI_PKT_RING_RST is
+ * common for both IQs and OQs
+ */
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
+ WRITE_ONCE(d64, octeon_read_csr64(
+ oct, CN23XX_SLI_PKT_IOQ_RING_RST));
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for this Output Queue. */
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+ while (octeon_read_csr64(oct,
+ CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
+ loop--) {
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
+ WRITE_ONCE(d32, octeon_read_csr(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
+ octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
+ READ_ONCE(d32));
+ }
+}
+
+static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 pkts_sent;
+ u64 ret = 0;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+
+ if (!droq) {
+ dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
+ oct->pf_num, ioq_vector->ioq_num);
+ return 0;
+ }
+
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int.*/
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ /* Never need to handle msix mbox intr for pf. They arrive on the last
+ * msix
+ */
+ return ret;
+}
+
+static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
+{
+ struct delayed_work *work;
+ u64 mbox_int_val;
+ u32 i, q_no;
+
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+
+ for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
+ q_no = i * oct->sriov_info.rings_per_vf;
+
+ if (mbox_int_val & BIT_ULL(q_no)) {
+ writeq(BIT_ULL(q_no),
+ oct->mbox[0]->mbox_int_reg);
+ if (octeon_mbox_read(oct->mbox[q_no])) {
+ work = &oct->mbox[q_no]->mbox_poll_wk.work;
+ schedule_delayed_work(work,
+ msecs_to_jiffies(0));
+ }
+ }
+ }
+}
+
+static irqreturn_t cn23xx_interrupt_handler(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr64;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ intr64 = readq(cn23xx->intr_sum_reg64);
+
+ oct->int_status = 0;
+
+ if (intr64 & CN23XX_INTR_ERR)
+ dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
+ oct->octeon_id, CVM_CAST64(intr64));
+
+ /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
+ if (intr64 & CN23XX_INTR_VF_MBOX)
+ cn23xx_handle_pf_mbox_intr(oct);
+
+ if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
+ if (intr64 & CN23XX_INTR_PKT_DATA)
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & (CN23XX_INTR_DMA0_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+ if (intr64 & (CN23XX_INTR_DMA1_FORCE))
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn23xx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid)
+{
+ u64 bar1;
+ u64 reg_adr;
+
+ if (!valid) {
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ reg_adr = lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+ WRITE_ONCE(bar1, reg_adr);
+ return;
+ }
+
+ /* The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
+ * bits <41:22> of the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+
+ WRITE_ONCE(bar1, lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
+}
+
+static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
+{
+ lio_pci_writeq(oct, mask,
+ CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(
+ oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
+}
+
+/* always call with lock held */
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 new_idx;
+ u32 last_done;
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Divide the single write to multiple writes based on the flag. */
+ /* Enable Interrupt */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ } else if ((intr_flag & OCTEON_MBOX_INTR) &&
+ (oct->sriov_info.max_vfs > 0)) {
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val |= CN23XX_INTR_VF_MBOX;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+ }
+}
+
+static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u64 intr_val = 0;
+
+ /* Disable Interrupts */
+ if (intr_flag == OCTEON_ALL_INTR) {
+ writeq(0, cn23xx->intr_enb_reg64);
+ } else if (intr_flag & OCTEON_OUTPUT_INTR) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_PKT_DATA;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ } else if ((intr_flag & OCTEON_MBOX_INTR) &&
+ (oct->sriov_info.max_vfs > 0)) {
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
+ intr_val = readq(cn23xx->intr_enb_reg64);
+ intr_val &= ~CN23XX_INTR_VF_MBOX;
+ writeq(intr_val, cn23xx->intr_enb_reg64);
+ }
+ }
+}
+
+static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
+ oct->pcie_port);
+}
+
+static int cn23xx_get_pf_num(struct octeon_device *oct)
+{
+ u32 fdl_bit = 0;
+ u64 pkt0_in_ctl, d64;
+ int pfnum, mac, trs, ret;
+
+ ret = 0;
+
+ /** Read Function Dependency Link reg to get the function number */
+ if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
+ &fdl_bit) == 0) {
+ oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
+ CN23XX_PCIE_SRIOV_FDL_MASK);
+ } else {
+ ret = -EINVAL;
+
+ /* Under some virtual environments, extended PCI regs are
+ * inaccessible, in which case the above read will have failed.
+ * In this case, read the PF number from the
+ * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
+ */
+ pkt0_in_ctl = octeon_read_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(0));
+ pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
+
+ /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
+ d64 = octeon_read_csr64(oct,
+ CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
+ trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
+ if (trs == 1) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
+ pfnum);
+ oct->pf_num = pfnum;
+ ret = 0;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
+ }
+ }
+
+ return ret;
+}
+
+static void cn23xx_setup_reg_address(struct octeon_device *oct)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+
+ oct->reg_list.pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
+ oct->reg_list.pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
+ oct->reg_list.pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
+
+ oct->reg_list.pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
+ oct->reg_list.pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
+ oct->reg_list.pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
+
+ oct->reg_list.pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
+ oct->reg_list.pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
+ oct->reg_list.pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
+
+ oct->reg_list.pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
+ oct->reg_list.pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
+ oct->reg_list.pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
+
+ cn23xx_get_pcie_qlmport(oct);
+
+ cn23xx->intr_mask64 = CN23XX_INTR_MASK;
+ if (!oct->msix_on)
+ cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
+ if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
+ cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
+
+ cn23xx->intr_sum_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ cn23xx->intr_enb_reg64 =
+ bar0_pciaddr +
+ CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+}
+
+int cn23xx_sriov_config(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
+ u32 max_rings, total_rings, max_vfs, rings_per_vf;
+ u32 pf_srn, num_pf_rings;
+ u32 max_possible_vfs;
+
+ cn23xx->conf =
+ (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
+ switch (oct->rev_id) {
+ case OCTEON_CN23XX_REV_1_0:
+ max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
+ break;
+ case OCTEON_CN23XX_REV_1_1:
+ max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
+ break;
+ default:
+ max_rings = CN23XX_MAX_RINGS_PER_PF;
+ max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
+ break;
+ }
+
+ if (oct->sriov_info.num_pf_rings)
+ num_pf_rings = oct->sriov_info.num_pf_rings;
+ else
+ num_pf_rings = num_present_cpus();
+
+#ifdef CONFIG_PCI_IOV
+ max_vfs = min_t(u32,
+ (max_rings - num_pf_rings), max_possible_vfs);
+ rings_per_vf = 1;
+#else
+ max_vfs = 0;
+ rings_per_vf = 0;
+#endif
+
+ total_rings = num_pf_rings + max_vfs;
+
+ /* the first ring of the pf */
+ pf_srn = total_rings - num_pf_rings;
+
+ oct->sriov_info.trs = total_rings;
+ oct->sriov_info.max_vfs = max_vfs;
+ oct->sriov_info.rings_per_vf = rings_per_vf;
+ oct->sriov_info.pf_srn = pf_srn;
+ oct->sriov_info.num_pf_rings = num_pf_rings;
+ dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
+ oct->sriov_info.trs, oct->sriov_info.max_vfs,
+ oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
+ oct->sriov_info.num_pf_rings);
+
+ oct->sriov_info.sriov_enabled = 0;
+
+ return 0;
+}
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
+{
+ u32 data32;
+ u64 BAR0, BAR1;
+
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
+ BAR0 = (u64)(data32 & ~0xf);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
+ BAR0 |= ((u64)data32 << 32);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
+ BAR1 = (u64)(data32 & ~0xf);
+ pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
+ BAR1 |= ((u64)data32 << 32);
+
+ if (!BAR0 || !BAR1) {
+ if (!BAR0)
+ dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
+ if (!BAR1)
+ dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
+ return 1;
+ }
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ if (cn23xx_get_pf_num(oct) != 0)
+ return 1;
+
+ if (cn23xx_sriov_config(oct)) {
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
+ oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
+ oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
+
+ oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
+ oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
+
+ oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
+ oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
+
+ cn23xx_setup_reg_address(oct);
+
+ oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx)
+{
+ if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf23xx),
+ CN23XX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf23xx),
+ CN23XX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+int cn23xx_fw_loaded(struct octeon_device *oct)
+{
+ u64 val;
+
+ /* If there's more than one active PF on this NIC, then that
+ * implies that the NIC firmware is loaded and running. This check
+ * prevents a rare false negative that might occur if we only relied
+ * on checking the SCR2_BIT_FW_LOADED flag. The false negative would
+ * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
+ * though the firmware was already loaded but still booting and has yet
+ * to set SCR2_BIT_FW_LOADED.
+ */
+ if (atomic_read(oct->adapter_refcount) > 1)
+ return 1;
+
+ val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+ return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
+}
+
+void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
+ u8 *mac)
+{
+ if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
+ struct octeon_mbox_cmd mbox_cmd;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = NULL;
+ ether_addr_copy(mbox_cmd.msg.s.params, mac);
+ mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
+ octeon_mbox_write(oct, &mbox_cmd);
+ }
+}
+
+static void
+cn23xx_get_vf_stats_callback(struct octeon_device *oct,
+ struct octeon_mbox_cmd *cmd, void *arg)
+{
+ struct oct_vf_stats_ctx *ctx = arg;
+
+ memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
+ atomic_set(&ctx->status, 1);
+}
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
+ struct oct_vf_stats *stats)
+{
+ u32 timeout = HZ; // 1sec
+ struct octeon_mbox_cmd mbox_cmd;
+ struct oct_vf_stats_ctx ctx;
+ u32 count = 0, ret;
+
+ if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
+ return -1;
+
+ if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
+ return -1;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (octeon_mbox_callback_t)cn23xx_get_vf_stats_callback;
+ ctx.stats = stats;
+ atomic_set(&ctx.status, 0);
+ mbox_cmd.fn_arg = (void *)&ctx;
+ memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
+ octeon_mbox_write(oct, &mbox_cmd);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
+
+ ret = atomic_read(&ctx.status);
+ if (ret == 0) {
+ octeon_mbox_cancel(oct, 0);
+ dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
+ vfidx);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
new file mode 100644
index 000000000..e6f31d0d5
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h
@@ -0,0 +1,73 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+ */
+
+#ifndef __CN23XX_PF_DEVICE_H__
+#define __CN23XX_PF_DEVICE_H__
+
+#include "cn23xx_pf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_pf {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+};
+
+#define CN23XX_SLI_DEF_BP 0x40
+
+struct oct_vf_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 broadcast;
+ u64 multicast;
+};
+
+int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
+
+int validate_cn23xx_pf_config_info(struct octeon_device *oct,
+ struct octeon_config *conf23xx);
+
+u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
+
+int cn23xx_sriov_config(struct octeon_device *oct);
+
+int cn23xx_fw_loaded(struct octeon_device *oct);
+
+void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
+ u8 *mac);
+
+int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx,
+ struct oct_vf_stats *stats);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
new file mode 100644
index 000000000..a0fd32476
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -0,0 +1,599 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX devices.
+ */
+
+#ifndef __CN23XX_PF_REGS_H__
+#define __CN23XX_PF_REGS_H__
+
+#define CN23XX_CONFIG_VENDOR_ID 0x00
+#define CN23XX_CONFIG_DEVICE_ID 0x02
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_MSIX_CAP 0x50
+#define CN23XX_CONFIG_MSIX_LMSI 0x54
+#define CN23XX_CONFIG_MSIX_UMSI 0x58
+#define CN23XX_CONFIG_MSIX_MSIMD 0x5C
+#define CN23XX_CONFIG_MSIX_MSIMM 0x60
+#define CN23XX_CONFIG_MSIX_MSIMP 0x64
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+#define CN23XX_CONFIG_PCIE_DEVCTL2 0x98
+#define CN23XX_CONFIG_PCIE_LINKCTL2 0xA0
+#define CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK 0x108
+#define CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS 0x110
+#define CN23XX_CONFIG_PCIE_DEVCTL_MASK 0x00040000
+
+#define CN23XX_PCIE_SRIOV_FDL 0x188
+#define CN23XX_PCIE_SRIOV_FDL_BIT_POS 0x10
+#define CN23XX_PCIE_SRIOV_FDL_MASK 0xFF
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+#define CN23XX_CONFIG_SRIOV_VFDEVID 0x190
+
+#define CN23XX_CONFIG_SRIOV_BAR_START 0x19C
+#define CN23XX_CONFIG_SRIOV_BARX(i) \
+ (CN23XX_CONFIG_SRIOV_BAR_START + ((i) * 4))
+#define CN23XX_CONFIG_SRIOV_BAR_PF 0x08
+#define CN23XX_CONFIG_SRIOV_BAR_64BIT 0x04
+#define CN23XX_CONFIG_SRIOV_BAR_IO 0x01
+
+/* ############## BAR0 Registers ################ */
+
+#define CN23XX_SLI_CTL_PORT_START 0x286E0
+#define CN23XX_PORT_OFFSET 0x10
+
+#define CN23XX_SLI_CTL_PORT(p) \
+ (CN23XX_SLI_CTL_PORT_START + ((p) * CN23XX_PORT_OFFSET))
+
+/* 2 scatch registers (64-bit) */
+#define CN23XX_SLI_WINDOW_CTL 0x282E0
+#define CN23XX_SLI_SCRATCH1 0x283C0
+#define CN23XX_SLI_SCRATCH2 0x283D0
+#define CN23XX_SLI_WINDOW_CTL_DEFAULT 0x200000ULL
+
+/* 1 registers (64-bit) - SLI_CTL_STATUS */
+#define CN23XX_SLI_CTL_STATUS 0x28570
+
+/* SLI Packet Input Jabber Register (64 bit register)
+ * <31:0> for Byte count for limiting sizes of packet sizes
+ * that are allowed for sli packet inbound packets.
+ * the default value is 0xFA00(=64000).
+ */
+#define CN23XX_SLI_PKT_IN_JABBER 0x29170
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+#define CN23XX_WIN_WR_ADDR_LO 0x20000
+#define CN23XX_WIN_WR_ADDR_HI 0x20004
+#define CN23XX_WIN_WR_ADDR64 CN23XX_WIN_WR_ADDR_LO
+
+#define CN23XX_WIN_RD_ADDR_LO 0x20010
+#define CN23XX_WIN_RD_ADDR_HI 0x20014
+#define CN23XX_WIN_RD_ADDR64 CN23XX_WIN_RD_ADDR_LO
+
+#define CN23XX_WIN_WR_DATA_LO 0x20020
+#define CN23XX_WIN_WR_DATA_HI 0x20024
+#define CN23XX_WIN_WR_DATA64 CN23XX_WIN_WR_DATA_LO
+
+#define CN23XX_WIN_RD_DATA_LO 0x20040
+#define CN23XX_WIN_RD_DATA_HI 0x20044
+#define CN23XX_WIN_RD_DATA64 CN23XX_WIN_RD_DATA_LO
+
+#define CN23XX_WIN_WR_MASK_LO 0x20030
+#define CN23XX_WIN_WR_MASK_HI 0x20034
+#define CN23XX_WIN_WR_MASK_REG CN23XX_WIN_WR_MASK_LO
+#define CN23XX_SLI_MAC_CREDIT_CNT 0x23D70
+
+/* 4 registers (64-bit) for mapping IOQs to MACs(PEMs)-
+ * SLI_PKT_MAC(0..3)_PF(0..1)_RINFO
+ */
+#define CN23XX_SLI_PKT_MAC_RINFO_START64 0x29030
+
+/*1 register (64-bit) to determine whether IOQs are in reset. */
+#define CN23XX_SLI_PKT_IOQ_RING_RST 0x291E0
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_IQ_OFFSET 0x20000
+
+#define CN23XX_MAC_RINFO_OFFSET 0x20
+#define CN23XX_PF_RINFO_OFFSET 0x10
+
+#define CN23XX_SLI_PKT_MAC_RINFO64(mac, pf) \
+ (CN23XX_SLI_PKT_MAC_RINFO_START64 + \
+ ((mac) * CN23XX_MAC_RINFO_OFFSET) + \
+ ((pf) * CN23XX_PF_RINFO_OFFSET))
+
+/** mask for total rings, setting TRS to base */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS BIT_ULL(16)
+/** mask for starting ring number: setting SRN <6:0> = 0x7F */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN (0x7F)
+
+/* Starting bit of the TRS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS 16
+/* Starting bit of SRN field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_SRN_BIT_POS 0
+/* Starting bit of RPVF field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS 32
+/* Starting bit of NVFS field in CN23XX_SLI_PKT_MAC_RINFO64 register */
+#define CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS 48
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_SIZE(iq) \
+ (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
+
+#define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/** These bits[47:44] select the Physical function number within the MAC */
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32] select the function number within the PF */
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE | \
+ CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP | \
+ CN23XX_PKT_INPUT_CTL_USE_CSR | \
+ CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_OQ_OFFSET 0x20000
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN23XX_SLI_OQ_WMARK 0x29180
+
+/* Global pkt control register */
+#define CN23XX_SLI_GBL_CONTROL 0x29210
+
+/* Backpressure enable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1S 0x29260
+
+/* Backpressure enable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1S 0x29270
+
+/* Backpressure disable register for PF0 */
+#define CN23XX_SLI_OUT_BP_EN_W1C 0x29280
+
+/* Backpressure disable register for PF1 */
+#define CN23XX_SLI_OUT_BP_EN2_W1C 0x29290
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_SIZE(oq) \
+ (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+/*Macro's for accessing CNT and TIME separately from INT_LEVELS*/
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET))
+
+#define CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+#define CN23XX_SLI_MAC_PF_MBOX_INT_START 0x27380
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_MBOX_INT(mac, pf) \
+ (CN23XX_SLI_MAC_PF_MBOX_INT_START + \
+ ((mac) * CN23XX_MAC_INT_OFFSET + \
+ (pf) * CN23XX_PF_INT_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN23XX_DMA_CNT_START 0x28400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values */
+/* SLI_DMA_0_TIM */
+#define CN23XX_DMA_TIM_START 0x28420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN23XX_DMA_INT_LEVEL_START 0x283E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN23XX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN23XX_DMA_CNT(dq) \
+ (CN23XX_DMA_CNT_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_PKT_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIME_INT_LEVEL(dq) \
+ (CN23XX_DMA_INT_LEVEL_START + 4 + ((dq) * CN23XX_DMA_OFFSET))
+
+#define CN23XX_DMA_TIM(dq) \
+ (CN23XX_DMA_TIM_START + ((dq) * CN23XX_DMA_OFFSET))
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+/*######################## INTERRUPTS #########################*/
+#define CN23XX_MAC_INT_OFFSET 0x20
+#define CN23XX_PF_INT_OFFSET 0x10
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN23XX_SLI_INT_SUM64 0x27000
+
+/* 4 registers (64-bit) for Interrupt Enable for each Port */
+#define CN23XX_SLI_INT_ENB64 0x27080
+
+#define CN23XX_SLI_MAC_PF_INT_SUM64(mac, pf) \
+ (CN23XX_SLI_INT_SUM64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+#define CN23XX_SLI_MAC_PF_INT_ENB64(mac, pf) \
+ (CN23XX_SLI_INT_ENB64 + \
+ ((mac) * CN23XX_MAC_INT_OFFSET) + \
+ ((pf) * CN23XX_PF_INT_OFFSET))
+
+/* 1 register (64-bit) to indicate which Output Queue reached pkt threshold */
+#define CN23XX_SLI_PKT_CNT_INT 0x29130
+
+/* 1 register (64-bit) to indicate which Output Queue reached time threshold */
+#define CN23XX_SLI_PKT_TIME_INT 0x29140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+#define CN23XX_INTR_RML_TIMEOUT_ERR (1)
+
+#define CN23XX_INTR_MIO_INT BIT(1)
+
+#define CN23XX_INTR_RESERVED1 (3 << 2)
+
+#define CN23XX_INTR_PKT_COUNT BIT(4)
+#define CN23XX_INTR_PKT_TIME BIT(5)
+
+#define CN23XX_INTR_RESERVED2 (3 << 6)
+
+#define CN23XX_INTR_M0UPB0_ERR BIT(8)
+#define CN23XX_INTR_M0UPWI_ERR BIT(9)
+#define CN23XX_INTR_M0UNB0_ERR BIT(10)
+#define CN23XX_INTR_M0UNWI_ERR BIT(11)
+
+#define CN23XX_INTR_RESERVED3 (0xFFFFFULL << 12)
+
+#define CN23XX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN23XX_INTR_DMA1_FORCE BIT_ULL(33)
+
+#define CN23XX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN23XX_INTR_DMA1_COUNT BIT_ULL(35)
+
+#define CN23XX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN23XX_INTR_DMA1_TIME BIT_ULL(37)
+
+#define CN23XX_INTR_RESERVED4 (0x7FFFFULL << 38)
+
+#define CN23XX_INTR_VF_MBOX BIT_ULL(57)
+#define CN23XX_INTR_DMAVF_ERR BIT_ULL(58)
+#define CN23XX_INTR_DMAPF_ERR BIT_ULL(59)
+
+#define CN23XX_INTR_PKTVF_ERR BIT_ULL(60)
+#define CN23XX_INTR_PKTPF_ERR BIT_ULL(61)
+#define CN23XX_INTR_PPVF_ERR BIT_ULL(62)
+#define CN23XX_INTR_PPPF_ERR BIT_ULL(63)
+
+#define CN23XX_INTR_DMA0_DATA (CN23XX_INTR_DMA0_TIME)
+#define CN23XX_INTR_DMA1_DATA (CN23XX_INTR_DMA1_TIME)
+
+#define CN23XX_INTR_DMA_DATA \
+ (CN23XX_INTR_DMA0_DATA | CN23XX_INTR_DMA1_DATA)
+
+/* By fault only TIME based */
+#define CN23XX_INTR_PKT_DATA (CN23XX_INTR_PKT_TIME)
+/* For both COUNT and TIME based */
+/* #define CN23XX_INTR_PKT_DATA \
+ * (CN23XX_INTR_PKT_COUNT | CN23XX_INTR_PKT_TIME)
+ */
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN23XX_INTR_PCIE_DATA \
+ (CN23XX_INTR_DMA_DATA | CN23XX_INTR_PKT_DAT)
+
+/* Sum of interrupts for error events */
+#define CN23XX_INTR_ERR \
+ (CN23XX_INTR_M0UPB0_ERR | \
+ CN23XX_INTR_M0UPWI_ERR | \
+ CN23XX_INTR_M0UNB0_ERR | \
+ CN23XX_INTR_M0UNWI_ERR | \
+ CN23XX_INTR_DMAVF_ERR | \
+ CN23XX_INTR_DMAPF_ERR | \
+ CN23XX_INTR_PKTPF_ERR | \
+ CN23XX_INTR_PPPF_ERR | \
+ CN23XX_INTR_PPVF_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN23XX_INTR_MASK \
+ (CN23XX_INTR_DMA_DATA | \
+ CN23XX_INTR_DMA0_FORCE | \
+ CN23XX_INTR_DMA1_FORCE | \
+ CN23XX_INTR_MIO_INT | \
+ CN23XX_INTR_ERR)
+
+/* 4 Registers (64 - bit) */
+#define CN23XX_SLI_S2M_PORT_CTL_START 0x23D80
+#define CN23XX_SLI_S2M_PORTX_CTL(port) \
+ (CN23XX_SLI_S2M_PORT_CTL_START + ((port) * 0x10))
+
+#define CN23XX_SLI_MAC_NUMBER 0x20050
+
+/** PEM(0..3)_BAR1_INDEX(0..15)address is defined as
+ * addr = (0x00011800C0000100 |port <<24 |idx <<3 )
+ * Here, port is PEM(0..3) & idx is INDEX(0..15)
+ */
+#define CN23XX_PEM_BAR1_INDEX_START 0x00011800C0000100ULL
+#define CN23XX_PEM_OFFSET 24
+#define CN23XX_BAR1_INDEX_OFFSET 3
+
+#define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
+ (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
+ ((idx) << CN23XX_BAR1_INDEX_OFFSET))
+
+/*############################ DPI #########################*/
+
+/* 1 register (64-bit) - provides DMA Enable */
+#define CN23XX_DPI_CTL 0x0001df0000000040ULL
+
+/* 1 register (64-bit) - Controls the DMA IO Operation */
+#define CN23XX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+/* 1 register (64-bit) - Provides DMA Instr'n Queue Enable */
+#define CN23XX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RSP
+ * Indicates which Instr'n Queue received error response from the IO sub-system
+ */
+#define CN23XX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+/* 1 register (64-bit) - DPI_REQ_ERR_RST
+ * Indicates which Instr'n Queue dropped an Instr'n
+ */
+#define CN23XX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+/* 6 register (64-bit) - DPI_DMA_ENG(0..5)_EN
+ * Provides DMA Engine Queue Enable
+ */
+#define CN23XX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+#define CN23XX_DPI_DMA_ENG_ENB(eng) (CN23XX_DPI_DMA_ENG0_ENB + ((eng) * 8))
+
+/* 8 register (64-bit) - DPI_DMA(0..7)_REQQ_CTL
+ * Provides control bits for transaction on 8 Queues
+ */
+#define CN23XX_DPI_DMA_REQQ0_CTL 0x0001df0000000180ULL
+#define CN23XX_DPI_DMA_REQQ_CTL(q_no) \
+ (CN23XX_DPI_DMA_REQQ0_CTL + ((q_no) * 8))
+
+/* 6 register (64-bit) - DPI_ENG(0..5)_BUF
+ * Provides DMA Engine FIFO (Queue) Size
+ */
+#define CN23XX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+#define CN23XX_DPI_DMA_ENG_BUF(eng) \
+ (CN23XX_DPI_DMA_ENG0_BUF + ((eng) * 8))
+
+/* 4 Registers (64-bit) */
+#define CN23XX_DPI_SLI_PRT_CFG_START 0x0001df0000000900ULL
+#define CN23XX_DPI_SLI_PRTX_CFG(port) \
+ (CN23XX_DPI_SLI_PRT_CFG_START + ((port) * 0x8))
+
+/* Masks for DPI_DMA_CONTROL Register */
+#define CN23XX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN23XX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN23XX_DPI_DMA_ENB (0x0FULL << 48)
+/* Set the DMA Control, to update packet count not byte count sent by DMA,
+ * when we use Interrupt Coalescing (CA mode)
+ */
+#define CN23XX_DPI_DMA_O_ADD1 BIT(19)
+/*selecting 64-bit Byte Swap Mode */
+#define CN23XX_DPI_DMA_O_ES BIT(15)
+#define CN23XX_DPI_DMA_O_MODE BIT(14)
+
+#define CN23XX_DPI_DMA_CTL_MASK \
+ (CN23XX_DPI_DMA_COMMIT_MODE | \
+ CN23XX_DPI_DMA_PKT_EN | \
+ CN23XX_DPI_DMA_O_ES | \
+ CN23XX_DPI_DMA_O_MODE)
+
+/*############################ RST #########################*/
+
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+#define CN23XX_RST_SOFT_RST 0x0001180006001680ULL
+
+#define CN23XX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN23XX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
new file mode 100644
index 000000000..fda494049
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.c
@@ -0,0 +1,682 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "cn23xx_vf_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+
+u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us;
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues)
+{
+ u32 loop = BUSY_READING_REG_VF_LOOP_COUNT;
+ int ret_val = 0;
+ u32 q_no;
+ u64 d64;
+
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ /* set RST bit to 1. This bit applies to both IQ and OQ */
+ d64 = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ d64 |= CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ d64);
+ }
+
+ /* wait until the RST bit is clear or the RST and QUIET bits are set */
+ for (q_no = 0; q_no < num_queues; q_no++) {
+ u64 reg_val = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+ loop--;
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+ WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
+ ~CN23XX_PKT_INPUT_CTL_RST);
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ READ_ONCE(reg_val));
+
+ WRITE_ONCE(reg_val, octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)));
+ if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset failed for qno: %u\n",
+ q_no);
+ ret_val = -1;
+ }
+ }
+
+ return ret_val;
+}
+
+static int cn23xx_vf_setup_global_input_regs(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ struct octeon_instr_queue *iq;
+ u64 q_no, intr_threshold;
+ u64 d64;
+
+ if (cn23xx_vf_reset_io_queues(oct, oct->sriov_info.rings_per_vf))
+ return -1;
+
+ for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+ void __iomem *inst_cnt_reg;
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_DOORBELL(q_no),
+ 0xFFFFFFFF);
+ iq = oct->instr_queue[q_no];
+
+ if (iq)
+ inst_cnt_reg = iq->inst_cnt_reg;
+ else
+ inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
+ CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no);
+
+ d64 = octeon_read_csr64(oct,
+ CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no));
+
+ d64 &= 0xEFFFFFFFFFFFFFFFL;
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ d64);
+
+ /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
+ * the Input Queues
+ */
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no),
+ CN23XX_PKT_INPUT_CTL_MASK);
+
+ /* set the wmark level to trigger PI_INT */
+ intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+
+ writeq((readq(inst_cnt_reg) &
+ ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
+ CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
+ (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
+ inst_cnt_reg);
+ }
+ return 0;
+}
+
+static void cn23xx_vf_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 reg_val;
+ u32 q_no;
+
+ for (q_no = 0; q_no < (oct->sriov_info.rings_per_vf); q_no++) {
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKTS_CREDIT(q_no),
+ 0xFFFFFFFF);
+
+ reg_val =
+ octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKTS_SENT(q_no));
+
+ reg_val &= 0xEFFFFFFFFFFFFFFFL;
+
+ reg_val =
+ octeon_read_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+
+ /* clear IPTR */
+ reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
+
+ /* set DPTR */
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
+
+ /* reset BMODE */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue ScatterList reset ROR_P, NSR_P
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
+#else
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
+#endif
+ /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
+ * for Output Queue Data reset ROR, NSR
+ */
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
+ reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
+ /* set the ES bit */
+ reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
+
+ /* write all the selected settings */
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no),
+ reg_val);
+ }
+}
+
+static int cn23xx_setup_vf_device_regs(struct octeon_device *oct)
+{
+ if (cn23xx_vf_setup_global_input_regs(oct))
+ return -1;
+
+ cn23xx_vf_setup_global_output_regs(oct);
+
+ return 0;
+}
+
+static void cn23xx_setup_vf_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+ u64 pkt_in_done;
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr
+ * for this queue
+ */
+ iq->doorbell_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter (used in flush_iq
+ * calculation)
+ */
+ pkt_in_done = readq(iq->inst_cnt_reg);
+
+ if (oct->msix_on) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
+ iq->inst_cnt_reg);
+ }
+ iq->reset_instr_cnt = 0;
+}
+
+static void cn23xx_setup_vf_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN23XX_VF_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ droq->buffer_size);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq_no);
+}
+
+static void cn23xx_vf_mbox_thread(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
+
+ octeon_mbox_process_message(mbox);
+}
+
+static int cn23xx_free_vf_mbox(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->mbox[0]->mbox_poll_wk.work);
+ vfree(oct->mbox[0]);
+ return 0;
+}
+
+static int cn23xx_setup_vf_mbox(struct octeon_device *oct)
+{
+ struct octeon_mbox *mbox = NULL;
+
+ mbox = vmalloc(sizeof(*mbox));
+ if (!mbox)
+ return 1;
+
+ memset(mbox, 0, sizeof(struct octeon_mbox));
+
+ spin_lock_init(&mbox->lock);
+
+ mbox->oct_dev = oct;
+
+ mbox->q_no = 0;
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+
+ /* VF mbox interrupt reg */
+ mbox->mbox_int_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_VF_SLI_PKT_MBOX_INT(0);
+ /* VF reads from SIG0 reg */
+ mbox->mbox_read_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 0);
+ /* VF writes into SIG1 reg */
+ mbox->mbox_write_reg =
+ (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_PKT_PF_VF_MBOX_SIG(0, 1);
+
+ INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
+ cn23xx_vf_mbox_thread);
+
+ mbox->mbox_poll_wk.ctxptr = mbox;
+
+ oct->mbox[0] = mbox;
+
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+
+ return 0;
+}
+
+static int cn23xx_enable_vf_io_queues(struct octeon_device *oct)
+{
+ u32 q_no;
+
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ u64 reg_val;
+
+ /* set the corresponding IQ IS_64B bit */
+ if (oct->io_qmask.iq64B & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val |= CN23XX_PKT_INPUT_CTL_IS_64B;
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+
+ /* set the corresponding IQ ENB bit */
+ if (oct->io_qmask.iq & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+ reg_val |= CN23XX_PKT_INPUT_CTL_RING_ENB;
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
+ }
+ }
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ u32 reg_val;
+
+ /* set the corresponding OQ ENB bit */
+ if (oct->io_qmask.oq & BIT_ULL(q_no)) {
+ reg_val = octeon_read_csr(
+ oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no));
+ reg_val |= CN23XX_PKT_OUTPUT_CTL_RING_ENB;
+ octeon_write_csr(
+ oct, CN23XX_VF_SLI_OQ_PKT_CONTROL(q_no), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+static void cn23xx_disable_vf_io_queues(struct octeon_device *oct)
+{
+ u32 num_queues = oct->num_iqs;
+
+ /* per HRM, rings can only be disabled via reset operation,
+ * NOT via SLI_PKT()_INPUT/OUTPUT_CONTROL[ENB]
+ */
+ if (num_queues < oct->num_oqs)
+ num_queues = oct->num_oqs;
+
+ cn23xx_vf_reset_io_queues(oct, num_queues);
+}
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 0;
+ mbox_cmd.msg.s.cmd = OCTEON_VF_FLR_REQUEST;
+ mbox_cmd.msg.s.len = 1;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = NULL;
+ mbox_cmd.fn_arg = NULL;
+
+ octeon_mbox_write(oct, &mbox_cmd);
+}
+
+static void octeon_pfvf_hs_callback(struct octeon_device *oct,
+ struct octeon_mbox_cmd *cmd,
+ void *arg)
+{
+ u32 major = 0;
+
+ memcpy((uint8_t *)&oct->pfvf_hsword, cmd->msg.s.params,
+ CN23XX_MAILBOX_MSGPARAM_SIZE);
+ if (cmd->recv_len > 1) {
+ major = ((struct lio_version *)(cmd->data))->major;
+ major = major << 16;
+ }
+
+ atomic_set((atomic_t *)arg, major | 1);
+}
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+ u32 q_no, count = 0;
+ atomic_t status;
+ u32 pfmajor;
+ u32 vfmajor;
+ u32 ret;
+
+ /* Sending VF_ACTIVE indication to the PF driver */
+ dev_dbg(&oct->pci_dev->dev, "requesting info from pf\n");
+
+ mbox_cmd.msg.u64 = 0;
+ mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
+ mbox_cmd.msg.s.resp_needed = 1;
+ mbox_cmd.msg.s.cmd = OCTEON_VF_ACTIVE;
+ mbox_cmd.msg.s.len = 2;
+ mbox_cmd.data[0] = 0;
+ ((struct lio_version *)&mbox_cmd.data[0])->major =
+ LIQUIDIO_BASE_MAJOR_VERSION;
+ ((struct lio_version *)&mbox_cmd.data[0])->minor =
+ LIQUIDIO_BASE_MINOR_VERSION;
+ ((struct lio_version *)&mbox_cmd.data[0])->micro =
+ LIQUIDIO_BASE_MICRO_VERSION;
+ mbox_cmd.q_no = 0;
+ mbox_cmd.recv_len = 0;
+ mbox_cmd.recv_status = 0;
+ mbox_cmd.fn = (octeon_mbox_callback_t)octeon_pfvf_hs_callback;
+ mbox_cmd.fn_arg = &status;
+
+ octeon_mbox_write(oct, &mbox_cmd);
+
+ atomic_set(&status, 0);
+
+ do {
+ schedule_timeout_uninterruptible(1);
+ } while ((!atomic_read(&status)) && (count++ < 100000));
+
+ ret = atomic_read(&status);
+ if (!ret) {
+ dev_err(&oct->pci_dev->dev, "octeon_pfvf_handshake timeout\n");
+ return 1;
+ }
+
+ for (q_no = 0 ; q_no < oct->num_iqs ; q_no++)
+ oct->instr_queue[q_no]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
+
+ vfmajor = LIQUIDIO_BASE_MAJOR_VERSION;
+ pfmajor = ret >> 16;
+ if (pfmajor != vfmajor) {
+ dev_err(&oct->pci_dev->dev,
+ "VF Liquidio driver (major version %d) is not compatible with Liquidio PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev,
+ "VF Liquidio driver (major version %d), Liquidio PF driver (major version %d)\n",
+ vfmajor, pfmajor);
+
+ dev_dbg(&oct->pci_dev->dev, "got data from pf pkind is %d\n",
+ oct->pfvf_hsword.pkind);
+
+ return 0;
+}
+
+static void cn23xx_handle_vf_mbox_intr(struct octeon_ioq_vector *ioq_vector)
+{
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ u64 mbox_int_val;
+
+ if (!ioq_vector->droq_index) {
+ /* read and clear by writing 1 */
+ mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
+ writeq(mbox_int_val, oct->mbox[0]->mbox_int_reg);
+ if (octeon_mbox_read(oct->mbox[0]))
+ schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
+ msecs_to_jiffies(0));
+ }
+}
+
+static u64 cn23xx_vf_msix_interrupt_handler(void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+ u64 pkts_sent;
+ u64 ret = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
+ pkts_sent = readq(droq->pkts_sent_reg);
+
+ /* If our device has interrupted, then proceed. Also check
+ * for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
+ return ret;
+
+ /* Write count reg in sli_pkt_cnts to clear these int. */
+ if ((pkts_sent & CN23XX_INTR_PO_INT) ||
+ (pkts_sent & CN23XX_INTR_PI_INT)) {
+ if (pkts_sent & CN23XX_INTR_PO_INT)
+ ret |= MSIX_PO_INT;
+ }
+
+ if (pkts_sent & CN23XX_INTR_PI_INT)
+ /* We will clear the count when we update the read_index. */
+ ret |= MSIX_PI_INT;
+
+ if (pkts_sent & CN23XX_INTR_MBOX_INT) {
+ cn23xx_handle_vf_mbox_intr(ioq_vector);
+ ret |= MSIX_MBOX_INT;
+ }
+
+ return ret;
+}
+
+static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 pkt_in_done = readl(iq->inst_cnt_reg);
+ u32 last_done;
+ u32 new_idx;
+
+ last_done = pkt_in_done - iq->pkt_in_done;
+ iq->pkt_in_done = pkt_in_done;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index. The iq->reset_instr_cnt is always zero for
+ * cn23xx, so no extra adjustments are needed.
+ */
+ new_idx = (iq->octeon_read_index +
+ (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
+ iq->max_count;
+
+ return new_idx;
+}
+
+static void cn23xx_enable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ u32 q_no, time_threshold;
+
+ if (intr_flag & OCTEON_OUTPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Set up interrupt packet and time thresholds
+ * for all the OQs
+ */
+ time_threshold = cn23xx_vf_get_oq_ticks(
+ oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
+
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
+ ((u64)time_threshold << 32)));
+ }
+ }
+
+ if (intr_flag & OCTEON_INPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Set CINT_ENB to enable IQ interrupt */
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ ((octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+ ~CN23XX_PKT_IN_DONE_CNT_MASK) |
+ CN23XX_INTR_CINT_ENB));
+ }
+ }
+
+ /* Set queue-0 MBOX_ENB to enable VF mailbox interrupt */
+ if (intr_flag & OCTEON_MBOX_INTR) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+ (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) |
+ CN23XX_INTR_MBOX_ENB));
+ }
+}
+
+static void cn23xx_disable_vf_interrupt(struct octeon_device *oct, u8 intr_flag)
+{
+ u32 q_no;
+
+ if (intr_flag & OCTEON_OUTPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ /* Write all 1's in INT_LEVEL reg to disable PO_INT */
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ 0x3fffffffffffff);
+ }
+ }
+ if (intr_flag & OCTEON_INPUT_INTR) {
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_INSTR_COUNT64(q_no)) &
+ ~(CN23XX_INTR_CINT_ENB |
+ CN23XX_PKT_IN_DONE_CNT_MASK)));
+ }
+ }
+
+ if (intr_flag & OCTEON_MBOX_INTR) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_PKT_MBOX_INT(0),
+ (octeon_read_csr64(oct, CN23XX_VF_SLI_PKT_MBOX_INT(0)) &
+ ~CN23XX_INTR_MBOX_ENB));
+ }
+}
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct)
+{
+ struct octeon_cn23xx_vf *cn23xx = (struct octeon_cn23xx_vf *)oct->chip;
+ u32 rings_per_vf;
+ u64 reg_val;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ /* INPUT_CONTROL[RPVF] gives the VF IOq count */
+ reg_val = octeon_read_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(0));
+
+ oct->pf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
+ oct->vf_num = (reg_val >> CN23XX_PKT_INPUT_CTL_VF_NUM_POS) &
+ CN23XX_PKT_INPUT_CTL_VF_NUM_MASK;
+
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+
+ rings_per_vf = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+
+ cn23xx->conf = oct_get_config_info(oct, LIO_23XX);
+ if (!cn23xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN23XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ if (oct->sriov_info.rings_per_vf > rings_per_vf) {
+ dev_warn(&oct->pci_dev->dev,
+ "num_queues:%d greater than PF configured rings_per_vf:%d. Reducing to %d.\n",
+ oct->sriov_info.rings_per_vf, rings_per_vf,
+ rings_per_vf);
+ oct->sriov_info.rings_per_vf = rings_per_vf;
+ } else {
+ if (rings_per_vf > num_present_cpus()) {
+ dev_warn(&oct->pci_dev->dev,
+ "PF configured rings_per_vf:%d greater than num_cpu:%d. Using rings_per_vf:%d equal to num cpus\n",
+ rings_per_vf,
+ num_present_cpus(),
+ num_present_cpus());
+ oct->sriov_info.rings_per_vf =
+ num_present_cpus();
+ } else {
+ oct->sriov_info.rings_per_vf = rings_per_vf;
+ }
+ }
+
+ oct->fn_list.setup_iq_regs = cn23xx_setup_vf_iq_regs;
+ oct->fn_list.setup_oq_regs = cn23xx_setup_vf_oq_regs;
+ oct->fn_list.setup_mbox = cn23xx_setup_vf_mbox;
+ oct->fn_list.free_mbox = cn23xx_free_vf_mbox;
+
+ oct->fn_list.msix_interrupt_handler = cn23xx_vf_msix_interrupt_handler;
+
+ oct->fn_list.setup_device_regs = cn23xx_setup_vf_device_regs;
+ oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
+
+ oct->fn_list.enable_interrupt = cn23xx_enable_vf_interrupt;
+ oct->fn_list.disable_interrupt = cn23xx_disable_vf_interrupt;
+
+ oct->fn_list.enable_io_queues = cn23xx_enable_vf_io_queues;
+ oct->fn_list.disable_io_queues = cn23xx_disable_vf_io_queues;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
new file mode 100644
index 000000000..2d06097d3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h
@@ -0,0 +1,48 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_device.h
+ * \brief Host Driver: Routines that perform CN23XX specific operations.
+ */
+
+#ifndef __CN23XX_VF_DEVICE_H__
+#define __CN23XX_VF_DEVICE_H__
+
+#include "cn23xx_vf_regs.h"
+
+/* Register address and configuration for a CN23XX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn23xx_vf {
+ struct octeon_config *conf;
+};
+
+#define BUSY_READING_REG_VF_LOOP_COUNT 10000
+
+#define CN23XX_MAILBOX_MSGPARAM_SIZE 6
+
+void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
+
+int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
+
+int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
+
+u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+
+void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
new file mode 100644
index 000000000..e95610941
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -0,0 +1,274 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn23xx_vf_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN23XX vf functions.
+ */
+
+#ifndef __CN23XX_VF_REGS_H__
+#define __CN23XX_VF_REGS_H__
+
+#define CN23XX_CONFIG_XPANSION_BAR 0x38
+
+#define CN23XX_CONFIG_PCIE_CAP 0x70
+#define CN23XX_CONFIG_PCIE_DEVCAP 0x74
+#define CN23XX_CONFIG_PCIE_DEVCTL 0x78
+#define CN23XX_CONFIG_PCIE_LINKCAP 0x7C
+#define CN23XX_CONFIG_PCIE_LINKCTL 0x80
+#define CN23XX_CONFIG_PCIE_SLOTCAP 0x84
+#define CN23XX_CONFIG_PCIE_SLOTCTL 0x88
+
+#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
+
+/* The input jabber is used to determine the TSO max size.
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
+ * PKO_BUG_24989_WQE_LEN
+ */
+#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
+
+/* ############## BAR0 Registers ################ */
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_VF_IQ_OFFSET 0x20000
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 64 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 0x10040
+
+/* 64 registers for Input Queues Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN23XX_VF_SLI_IQ_BASE_ADDR_START64 0x10010
+
+/* 64 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN23XX_VF_SLI_IQ_DOORBELL_START 0x10020
+
+/* 64 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN23XX_VF_SLI_IQ_SIZE_START 0x10030
+
+/* 64 registers (64-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT(0..63)_INPUT_CONTROL.
+ */
+#define CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 0x10000
+
+/*------- Request Queue Macros ---------*/
+#define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \
+ (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \
+ (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_SIZE(iq) \
+ (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_DOORBELL(iq) \
+ (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+#define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \
+ (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM BIT_ULL(32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM BIT(29)
+/* Number of instructions to be read in one MAC read request.
+ * setting to Max value(4)
+ */
+#define CN23XX_PKT_INPUT_CTL_RDSIZE (3 << 25)
+#define CN23XX_PKT_INPUT_CTL_IS_64B BIT(24)
+#define CN23XX_PKT_INPUT_CTL_RST BIT(23)
+#define CN23XX_PKT_INPUT_CTL_QUIET BIT(28)
+#define CN23XX_PKT_INPUT_CTL_RING_ENB BIT(22)
+#define CN23XX_PKT_INPUT_CTL_DATA_NS BIT(8)
+#define CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN23XX_PKT_INPUT_CTL_DATA_RO BIT(5)
+#define CN23XX_PKT_INPUT_CTL_USE_CSR BIT(4)
+#define CN23XX_PKT_INPUT_CTL_GATHER_NS BIT(3)
+#define CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP (2)
+#define CN23XX_PKT_INPUT_CTL_GATHER_RO (1)
+
+/** Rings per Virtual Function [RO] **/
+#define CN23XX_PKT_INPUT_CTL_RPVF_MASK (0x3F)
+#define CN23XX_PKT_INPUT_CTL_RPVF_POS (48)
+/* These bits[47:44][RO] give the Physical function number info within the MAC*/
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_MASK (0x7)
+#define CN23XX_PKT_INPUT_CTL_PF_NUM_POS (45)
+/** These bits[43:32][RO] give the virtual function number info within the PF*/
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_MASK (0x1FFF)
+#define CN23XX_PKT_INPUT_CTL_VF_NUM_POS (32)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_MASK (0x3)
+#define CN23XX_PKT_INPUT_CTL_MAC_NUM_POS (29)
+#define CN23XX_PKT_IN_DONE_WMARK_MASK (0xFFFFULL)
+#define CN23XX_PKT_IN_DONE_WMARK_BIT_POS (32)
+#define CN23XX_PKT_IN_DONE_CNT_MASK (0x00000000FFFFFFFFULL)
+
+#ifdef __LITTLE_ENDIAN_BITFIELD
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE \
+ | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN23XX_PKT_INPUT_CTL_USE_CSR)
+#else
+#define CN23XX_PKT_INPUT_CTL_MASK \
+ (CN23XX_PKT_INPUT_CTL_RDSIZE \
+ | CN23XX_PKT_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN23XX_PKT_INPUT_CTL_USE_CSR \
+ | CN23XX_PKT_INPUT_CTL_GATHER_ES_64B_SWAP)
+#endif
+
+/** Masks for SLI_PKT_IN_DONE(0..63)_CNTS Register */
+#define CN23XX_IN_DONE_CNTS_PI_INT BIT_ULL(62)
+#define CN23XX_IN_DONE_CNTS_CINT_ENB BIT_ULL(48)
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 64 registers for Output queue control - SLI_PKT(0..63)_OUTPUT_CONTROL */
+#define CN23XX_VF_SLI_OQ_PKT_CONTROL_START 0x10050
+
+/* 64 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE 0x10060
+
+/* 64 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN23XX_VF_SLI_OQ_BASE_ADDR_START64 0x10070
+
+/* 64 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN23XX_VF_SLI_OQ_PKT_CREDITS_START 0x10080
+
+/* 64 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN23XX_VF_SLI_OQ_SIZE_START 0x10090
+
+/* 64 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN23XX_VF_SLI_OQ_PKT_SENT_START 0x100B0
+
+/* 64 registers for Output Queue INT Levels - SLI_PKT0_INT_LEVELS */
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 0x100A0
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN23XX_VF_OQ_OFFSET 0x20000
+
+/*------- Output Queue Macros ---------*/
+
+#define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \
+ (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_SIZE(oq) \
+ (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_CREDITS_START + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+/* Macro's for accessing CNT and TIME separately from INT_LEVELS */
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + ((oq) * CN23XX_VF_OQ_OFFSET))
+
+#define CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq) \
+ (CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_START64 + \
+ ((oq) * CN23XX_VF_OQ_OFFSET) + 4)
+
+/*------------------ Masks ----------------*/
+#define CN23XX_PKT_OUTPUT_CTL_TENB BIT(13)
+#define CN23XX_PKT_OUTPUT_CTL_CENB BIT(12)
+#define CN23XX_PKT_OUTPUT_CTL_IPTR BIT(11)
+#define CN23XX_PKT_OUTPUT_CTL_ES BIT(9)
+#define CN23XX_PKT_OUTPUT_CTL_NSR BIT(8)
+#define CN23XX_PKT_OUTPUT_CTL_ROR BIT(7)
+#define CN23XX_PKT_OUTPUT_CTL_DPTR BIT(6)
+#define CN23XX_PKT_OUTPUT_CTL_BMODE BIT(5)
+#define CN23XX_PKT_OUTPUT_CTL_ES_P BIT(3)
+#define CN23XX_PKT_OUTPUT_CTL_NSR_P BIT(2)
+#define CN23XX_PKT_OUTPUT_CTL_ROR_P BIT(1)
+#define CN23XX_PKT_OUTPUT_CTL_RING_ENB BIT(0)
+
+/*######################### Mailbox Reg Macros ########################*/
+#define CN23XX_VF_SLI_PKT_MBOX_INT_START 0x10210
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START 0x10200
+
+#define CN23XX_SLI_MBOX_OFFSET 0x20000
+#define CN23XX_SLI_MBOX_SIG_IDX_OFFSET 0x8
+
+#define CN23XX_VF_SLI_PKT_MBOX_INT(q) \
+ (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET))
+
+#define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \
+ (CN23XX_SLI_PKT_PF_VF_MBOX_SIG_START + \
+ ((q) * CN23XX_SLI_MBOX_OFFSET + \
+ (idx) * CN23XX_SLI_MBOX_SIG_IDX_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+#define CN23XX_VF_SLI_INT_SUM_START 0x100D0
+
+#define CN23XX_VF_SLI_INT_SUM(q) \
+ (CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET))
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN23XX_INTR_PO_INT BIT_ULL(63)
+#define CN23XX_INTR_PI_INT BIT_ULL(62)
+#define CN23XX_INTR_MBOX_INT BIT_ULL(61)
+#define CN23XX_INTR_RESEND BIT_ULL(60)
+
+#define CN23XX_INTR_CINT_ENB BIT_ULL(48)
+#define CN23XX_INTR_MBOX_ENB BIT(0)
+
+/*############################ MIO #########################*/
+#define CN23XX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN23XX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN23XX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN23XX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN23XX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN23XX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN23XX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN23XX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN23XX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN23XX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN23XX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN23XX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN23XX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN23XX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+/*############################ RST #########################*/
+#define CN23XX_RST_BOOT 0x0001180006001600ULL
+
+/*######################## MSIX TABLE #########################*/
+
+#define CN23XX_MSIX_TABLE_ADDR_START 0x0
+#define CN23XX_MSIX_TABLE_DATA_START 0x8
+
+#define CN23XX_MSIX_TABLE_SIZE 0x10
+#define CN23XX_MSIX_TABLE_ENTRIES 0x41
+
+#define CN23XX_MSIX_ENTRY_VECTOR_CTL BIT_ULL(32)
+
+#define CN23XX_MSIX_TABLE_ADDR(idx) \
+ (CN23XX_MSIX_TABLE_ADDR_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#define CN23XX_MSIX_TABLE_DATA(idx) \
+ (CN23XX_MSIX_TABLE_DATA_START + ((idx) * CN23XX_MSIX_TABLE_SIZE))
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
new file mode 100644
index 000000000..39643be8c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -0,0 +1,737 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+
+int lio_cn6xxx_soft_reset(struct octeon_device *oct)
+{
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n");
+
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST);
+ octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL);
+
+ lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST);
+ lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST);
+
+ /* Wait for 10ms as Octeon resets. */
+ mdelay(100);
+
+ if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) {
+ dev_err(&oct->pci_dev->dev, "Soft reset failed\n");
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "Reset completed\n");
+ octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF);
+
+ return 0;
+}
+
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
+{
+ u32 val;
+
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+ if (val & 0x000c0000) {
+ dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
+ val & 0x000c0000);
+ }
+
+ val |= 0xf; /* Enable Link error reporting */
+
+ dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n");
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+}
+
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MPS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mps == PCIE_MPS_DEFAULT) {
+ mps = ((val & (0x7 << 5)) >> 5);
+ } else {
+ val &= ~(0x7 << 5); /* Turn off any MPS bits */
+ val |= (mps << 5); /* Set MPS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= (mps << 4);
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs)
+{
+ u32 val;
+ u64 r64;
+
+ /* Read config register for MRRS */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
+
+ if (mrrs == PCIE_MRRS_DEFAULT) {
+ mrrs = ((val & (0x7 << 12)) >> 12);
+ } else {
+ val &= ~(0x7 << 12); /* Turn off any MRRS bits */
+ val |= (mrrs << 12); /* Set MRRS */
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val);
+ }
+
+ /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
+ r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port));
+ r64 |= mrrs;
+ octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64);
+
+ /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
+ r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+ r64 |= mrrs;
+ lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port));
+}
+
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct)
+{
+ /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
+ * for SLI.
+ */
+ return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50;
+}
+
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct,
+ u32 time_intr_in_us)
+{
+ /* This gives the SLI clock per microsec */
+ u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct);
+
+ /* core clock per us / oq ticks will be fractional. TO avoid that
+ * we use the method below.
+ */
+
+ /* This gives the clock cycles per millisecond */
+ oqticks_per_us *= 1000;
+
+ /* This gives the oq ticks (1024 core clock cycles) per millisecond */
+ oqticks_per_us /= 1024;
+
+ /* time_intr is in microseconds. The next 2 steps gives the oq ticks
+ * corressponding to time_intr.
+ */
+ oqticks_per_us *= time_intr_in_us;
+ oqticks_per_us /= 1000;
+
+ return oqticks_per_us;
+}
+
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct)
+{
+ /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL,
+ CN6XXX_INPUT_CTL_MASK);
+
+ /* Instruction Read Size - Max 4 instructions per PCIE Read */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE,
+ 0xFFFFFFFFFFFFFFFFULL);
+
+ /* Select PCIE Port for all Input rings. */
+ octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT,
+ (oct->pcie_port * 0x5555555555555555ULL));
+}
+
+static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ u64 pktctl;
+
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 66XX SPECIFIC */
+ if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4)
+ /* Disable RING_EN if only upto 4 rings are used. */
+ pktctl &= ~(1 << 4);
+ else
+ pktctl |= (1 << 4);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
+{
+ u32 time_threshold;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ /* / Select PCI-E Port for all Output queues */
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64,
+ (oct->pcie_port * 0x5555555555555555ULL));
+
+ if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) {
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32);
+ } else {
+ /* / Set Output queue watermark to 0 to disable backpressure */
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0);
+ }
+
+ /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
+
+ /* Select ES, RO, NS setting from register for Output Queue Packet
+ * Address
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
+
+ /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
+ * Queue ScatterList
+ */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0);
+
+ /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
+#ifdef __BIG_ENDIAN_BITFIELD
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64,
+ 0x5555555555555555ULL);
+#else
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL);
+#endif
+
+ /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0);
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64,
+ 0x5555555555555555ULL);
+
+ /* / Set up interrupt packet and time threshold */
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf));
+ time_threshold =
+ lio_cn6xxx_get_oq_ticks(oct, (u32)
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf));
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
+}
+
+static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn66xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+ return 0;
+}
+
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0);
+
+ /* Write the start of the input queue's ring and its size */
+ octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no),
+ iq->base_addr_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count);
+
+ /* Remember the doorbell & instruction count register addr for this
+ * queue
+ */
+ iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no);
+ iq->inst_cnt_reg = oct->mmio[0].hw_addr
+ + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no);
+ dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
+
+ /* Store the current instruction counter
+ * (used in flush_iq calculation)
+ */
+ iq->reset_instr_cnt = readl(iq->inst_cnt_reg);
+}
+
+static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
+{
+ lio_cn6xxx_setup_iq_regs(oct, iq_no);
+
+ /* Backpressure for this queue - WMARK set to all F's. This effectively
+ * disables the backpressure mechanism.
+ */
+ octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no),
+ (0xFFFFFFFFULL << 32));
+}
+
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
+{
+ u32 intr;
+ struct octeon_droq *droq = oct->droq[oq_no];
+
+ octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no),
+ droq->desc_ring_dma);
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count);
+
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
+ droq->buffer_size);
+
+ /* Get the mapped address of the pkt_sent and pkts_credit regs */
+ droq->pkts_sent_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no);
+ droq->pkts_credit_reg =
+ oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr);
+
+ /* Enable this output queue to generate Packet Timer Interrupt */
+ intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ intr |= (1 << oq_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr);
+}
+
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
+{
+ u32 mask;
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE);
+ mask |= oct->io_qmask.iq64B;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask |= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask |= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ return 0;
+}
+
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
+{
+ int i;
+ u32 mask, loop = HZ;
+ u32 d32;
+
+ /* Reset the Enable bits for Input Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB);
+ mask ^= oct->io_qmask.iq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ mask = (u32)oct->io_qmask.iq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
+ schedule_timeout_uninterruptible(1);
+ }
+
+ /* Reset the doorbell register for each Input queue. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
+ }
+
+ /* Reset the Enable bits for Output Queues. */
+ mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+ mask ^= oct->io_qmask.oq;
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask);
+
+ /* Wait until hardware indicates that the queues are out of reset. */
+ loop = HZ;
+ mask = (u32)oct->io_qmask.oq;
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ while (((d32 & mask) != mask) && loop--) {
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
+ schedule_timeout_uninterruptible(1);
+ }
+ ;
+
+ /* Reset the doorbell register for each Output queue. */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i));
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32);
+ }
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32);
+
+ d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ if (d32)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32);
+}
+
+void
+lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct,
+ u64 core_addr,
+ u32 idx,
+ int valid)
+{
+ u64 bar1;
+
+ if (valid == 0) {
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+ return;
+ }
+
+ /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
+ * the Core Addr
+ */
+ lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
+ CN6XXX_BAR1_REG(idx, oct->pcie_port));
+
+ bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct,
+ u32 idx,
+ u32 mask)
+{
+ lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
+{
+ return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port));
+}
+
+u32
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
+{
+ u32 new_idx = readl(iq->inst_cnt_reg);
+
+ /* The new instr cnt reg is a 32-bit counter that can roll over. We have
+ * noted the counter's initial value at init time into
+ * reset_instr_cnt
+ */
+ if (iq->reset_instr_cnt < new_idx)
+ new_idx -= iq->reset_instr_cnt;
+ else
+ new_idx += (0xffffffff - iq->reset_instr_cnt) + 1;
+
+ /* Modulo of the new index with the IQ size will give us
+ * the new index.
+ */
+ new_idx %= iq->max_count;
+
+ return new_idx;
+}
+
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE;
+
+ /* Enable Interrupt */
+ writeq(mask, cn6xxx->intr_enb_reg64);
+}
+
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct,
+ u8 unused __attribute__((unused)))
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ /* Disable Interrupts */
+ writeq(0, cn6xxx->intr_enb_reg64);
+}
+
+static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
+{
+ /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
+ * to determine the PCIE port #
+ */
+ oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff;
+
+ dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
+}
+
+static void
+lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
+{
+ dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
+ CVM_CAST64(intr64));
+}
+
+static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+{
+ struct octeon_droq *droq;
+ int oq_no;
+ u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ u32 droq_cnt_enb, droq_cnt_mask;
+
+ droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT);
+ droq_mask = droq_cnt_mask & droq_cnt_enb;
+
+ droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT);
+ droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ droq_mask |= (droq_time_mask & droq_int_enb);
+
+ droq_mask &= oct->io_qmask.oq;
+
+ oct->droq_intr = 0;
+
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
+ if (!(droq_mask & BIT_ULL(oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+ pkt_count = octeon_droq_check_hw_for_pkts(droq);
+ if (pkt_count) {
+ oct->droq_intr |= BIT_ULL(oq_no);
+ if (droq->ops.poll_mode) {
+ u32 value;
+ u32 reg;
+
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ /* disable interrupts for this droq */
+ spin_lock
+ (&cn6xxx->lock_for_droq_int_enb_reg);
+ reg = CN6XXX_SLI_PKT_TIME_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+ reg = CN6XXX_SLI_PKT_CNT_INT_ENB;
+ value = octeon_read_csr(oct, reg);
+ value &= ~(1 << oq_no);
+ octeon_write_csr(oct, reg, value);
+
+ spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg);
+ }
+ }
+ }
+
+ droq_time_mask &= oct->io_qmask.oq;
+ droq_cnt_mask &= oct->io_qmask.oq;
+
+ /* Reset the PKT_CNT/TIME_INT registers. */
+ if (droq_time_mask)
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask);
+
+ if (droq_cnt_mask) /* reset PKT_CNT register:66xx */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask);
+
+ return 0;
+}
+
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+ u64 intr64;
+
+ intr64 = readq(cn6xxx->intr_sum_reg64);
+
+ /* If our device has interrupted, then proceed.
+ * Also check for all f's if interrupt was triggered on an error
+ * and the PCI read fails.
+ */
+ if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL))
+ return IRQ_NONE;
+
+ oct->int_status = 0;
+
+ if (intr64 & CN6XXX_INTR_ERR)
+ lio_cn6xxx_process_pcie_error_intr(oct, intr64);
+
+ if (intr64 & CN6XXX_INTR_PKT_DATA) {
+ lio_cn6xxx_process_droq_intr_regs(oct);
+ oct->int_status |= OCT_DEV_INTR_PKT_DATA;
+ }
+
+ if (intr64 & CN6XXX_INTR_DMA0_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
+
+ if (intr64 & CN6XXX_INTR_DMA1_FORCE)
+ oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
+
+ /* Clear the current interrupts */
+ writeq(intr64, cn6xxx->intr_sum_reg64);
+
+ return IRQ_HANDLED;
+}
+
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct,
+ void *chip,
+ struct octeon_reg_list *reg_list)
+{
+ u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip;
+
+ reg_list->pci_win_wr_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI);
+ reg_list->pci_win_wr_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO);
+ reg_list->pci_win_wr_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64);
+
+ reg_list->pci_win_rd_addr_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI);
+ reg_list->pci_win_rd_addr_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO);
+ reg_list->pci_win_rd_addr =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64);
+
+ reg_list->pci_win_wr_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI);
+ reg_list->pci_win_wr_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO);
+ reg_list->pci_win_wr_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64);
+
+ reg_list->pci_win_rd_data_hi =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI);
+ reg_list->pci_win_rd_data_lo =
+ (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO);
+ reg_list->pci_win_rd_data =
+ (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64);
+
+ lio_cn6xxx_get_pcie_qlmport(oct);
+
+ cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64;
+ cn6xxx->intr_mask64 = CN6XXX_INTR_MASK;
+ cn6xxx->intr_enb_reg64 =
+ bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port);
+}
+
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.soft_reset = lio_cn6xxx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ cn6xxx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, LIO_210SV);
+ if (!cn6xxx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ return 0;
+}
+
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *conf6xxx)
+{
+ if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_IQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_INPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) {
+ dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
+ __func__, CFG_GET_OQ_MAX_Q(conf6xxx),
+ CN6XXX_MAX_OUTPUT_QUEUES);
+ return 1;
+ }
+
+ if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR &&
+ CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
+ __func__);
+ return 1;
+ }
+ if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx)) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) {
+ dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n",
+ __func__);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
new file mode 100644
index 000000000..8ed57134e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -0,0 +1,98 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn66xx_device.h
+ * \brief Host Driver: Routines that perform CN66XX specific operations.
+ */
+
+#ifndef __CN66XX_DEVICE_H__
+#define __CN66XX_DEVICE_H__
+
+/* Register address and configuration for a CN6XXX devices.
+ * If device specific changes need to be made then add a struct to include
+ * device specific fields as shown in the commented section
+ */
+struct octeon_cn6xxx {
+ /** PCI interrupt summary register */
+ u8 __iomem *intr_sum_reg64;
+
+ /** PCI interrupt enable register */
+ u8 __iomem *intr_enb_reg64;
+
+ /** The PCI interrupt mask used by interrupt handler */
+ u64 intr_mask64;
+
+ struct octeon_config *conf;
+
+ /* Example additional fields - not used currently
+ * struct {
+ * }cn6xyz;
+ */
+
+ /* For the purpose of atomic access to interrupt enable reg */
+ spinlock_t lock_for_droq_int_enb_reg;
+
+};
+
+enum octeon_pcie_mps {
+ PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MPS_128B = 0,
+ PCIE_MPS_256B = 1
+};
+
+enum octeon_pcie_mrrs {
+ PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */
+ PCIE_MRRS_128B = 0,
+ PCIE_MRRS_256B = 1,
+ PCIE_MRRS_512B = 2,
+ PCIE_MRRS_1024B = 3,
+ PCIE_MRRS_2048B = 4,
+ PCIE_MRRS_4096B = 5
+};
+
+/* Common functions for 66xx and 68xx */
+int lio_cn6xxx_soft_reset(struct octeon_device *oct);
+void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
+void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
+ enum octeon_pcie_mps mps);
+void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
+ enum octeon_pcie_mrrs mrrs);
+void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
+void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
+void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
+int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
+void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
+irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
+void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
+ u32 idx, int valid);
+void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
+u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
+u32
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
+void lio_cn6xxx_enable_interrupt(struct octeon_device *oct, u8 unused);
+void lio_cn6xxx_disable_interrupt(struct octeon_device *oct, u8 unused);
+void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
+void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip,
+ struct octeon_reg_list *reg_list);
+u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct);
+u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
+int lio_setup_cn66xx_octeon_device(struct octeon_device *oct);
+int lio_validate_cn6xxx_config_info(struct octeon_device *oct,
+ struct octeon_config *conf6xxx);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
new file mode 100644
index 000000000..7aad40b2a
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h
@@ -0,0 +1,530 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn66xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN66XX devices.
+ */
+
+#ifndef __CN66XX_REGS_H__
+#define __CN66XX_REGS_H__
+
+#define CN6XXX_XPANSION_BAR 0x30
+
+#define CN6XXX_MSI_CAP 0x50
+#define CN6XXX_MSI_ADDR_LO 0x54
+#define CN6XXX_MSI_ADDR_HI 0x58
+#define CN6XXX_MSI_DATA 0x5C
+
+#define CN6XXX_PCIE_CAP 0x70
+#define CN6XXX_PCIE_DEVCAP 0x74
+#define CN6XXX_PCIE_DEVCTL 0x78
+#define CN6XXX_PCIE_LINKCAP 0x7C
+#define CN6XXX_PCIE_LINKCTL 0x80
+#define CN6XXX_PCIE_SLOTCAP 0x84
+#define CN6XXX_PCIE_SLOTCTL 0x88
+
+#define CN6XXX_PCIE_ENH_CAP 0x100
+#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104
+#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108
+#define CN6XXX_PCIE_UNCORR_ERR 0x10C
+#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110
+#define CN6XXX_PCIE_CORR_ERR_MASK 0x114
+#define CN6XXX_PCIE_ADV_ERR_CAP 0x118
+
+#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700
+#define CN6XXX_PCIE_OTHER_MSG 0x704
+#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708
+#define CN6XXX_PCIE_ACK_FREQ 0x70C
+#define CN6XXX_PCIE_PORT_LINK_CTL 0x710
+#define CN6XXX_PCIE_LANE_SKEW 0x714
+#define CN6XXX_PCIE_SYM_NUM 0x718
+#define CN6XXX_PCIE_FLTMSK 0x720
+
+/* ############## BAR0 Registers ################ */
+
+#define CN6XXX_SLI_CTL_PORT0 0x0050
+#define CN6XXX_SLI_CTL_PORT1 0x0060
+
+#define CN6XXX_SLI_WINDOW_CTL 0x02E0
+#define CN6XXX_SLI_DBG_DATA 0x0310
+#define CN6XXX_SLI_SCRATCH1 0x03C0
+#define CN6XXX_SLI_SCRATCH2 0x03D0
+#define CN6XXX_SLI_CTL_STATUS 0x0570
+
+#define CN6XXX_WIN_WR_ADDR_LO 0x0000
+#define CN6XXX_WIN_WR_ADDR_HI 0x0004
+#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO
+
+#define CN6XXX_WIN_RD_ADDR_LO 0x0010
+#define CN6XXX_WIN_RD_ADDR_HI 0x0014
+#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO
+
+#define CN6XXX_WIN_WR_DATA_LO 0x0020
+#define CN6XXX_WIN_WR_DATA_HI 0x0024
+#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO
+
+#define CN6XXX_WIN_RD_DATA_LO 0x0040
+#define CN6XXX_WIN_RD_DATA_HI 0x0044
+#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO
+
+#define CN6XXX_WIN_WR_MASK_LO 0x0030
+#define CN6XXX_WIN_WR_MASK_HI 0x0034
+#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO
+
+/* 1 register (32-bit) to enable Input queues */
+#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000
+
+/* 1 register (32-bit) to enable Output queues */
+#define CN6XXX_SLI_PKT_OUT_ENB 0x1010
+
+/* 1 register (32-bit) to determine whether Output queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0
+
+/* 1 register (32-bit) to determine whether Input queues are in reset. */
+#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4
+
+/*###################### REQUEST QUEUE #########################*/
+
+/* 1 register (32-bit) - instr. size of each input queue. */
+#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020
+
+/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */
+#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000
+
+/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */
+#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800
+
+/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */
+#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00
+
+/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */
+#define CN6XXX_SLI_IQ_SIZE_START 0x3000
+
+/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400
+
+/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */
+#define CN66XX_SLI_INPUT_BP_START64 0x3800
+
+/* Each Input Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_IQ_OFFSET 0x10
+
+/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data &
+ * gather list fetches. SLI_PKT_INPUT_CONTROL.
+ */
+#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170
+
+/* 1 register (64-bit) - Number of instructions to read at one time
+ * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE.
+ */
+#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0
+
+/* 1 register (64-bit) - Assign Input ring to MAC port
+ * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT.
+ */
+#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0
+
+/*------- Request Queue Macros ---------*/
+#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \
+ (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_SIZE(iq) \
+ (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \
+ (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_DOORBELL(iq) \
+ (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \
+ (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
+
+#define CN66XX_SLI_IQ_BP64(iq) \
+ (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*------------------ Masks ----------------*/
+#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22)
+#define CN6XXX_INPUT_CTL_DATA_NS BIT(8)
+#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6)
+#define CN6XXX_INPUT_CTL_DATA_RO BIT(5)
+#define CN6XXX_INPUT_CTL_USE_CSR BIT(4)
+#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3)
+#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2)
+#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1)
+
+#ifdef __BIG_ENDIAN_BITFIELD
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR \
+ | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP)
+#else
+#define CN6XXX_INPUT_CTL_MASK \
+ (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \
+ | CN6XXX_INPUT_CTL_USE_CSR)
+#endif
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */
+#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00
+
+/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */
+#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400
+
+/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */
+#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800
+
+/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */
+#define CN6XXX_SLI_OQ_SIZE_START 0x1C00
+
+/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */
+#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400
+
+/* Each Output Queue register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_OQ_OFFSET 0x10
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_ROR
+ */
+#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues descriptors
+ * - SLI_PKT_SLIST_NS
+ */
+#define CN6XXX_SLI_PKT_SLIST_NS 0x1040
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue descriptors
+ * - SLI_PKT_SLIST_ES
+ */
+#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - InfoPtr mode for Output Queues.
+ * - SLI_PKT_IPTR
+ */
+#define CN6XXX_SLI_PKT_IPTR 0x1070
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - DPTR format selector for Output queues.
+ * - SLI_PKT_DPADDR
+ */
+#define CN6XXX_SLI_PKT_DPADDR 0x1080
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Relaxed Ordering setting for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_ROR
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - No Snoop mode for reading Output Queues data
+ * - SLI_PKT_DATA_OUT_NS
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Endian-Swap mode for reading Output Queue data
+ * - SLI_PKT_DATA_OUT_ES
+ */
+#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0
+
+/* 1 register (32-bit) - 1 bit for each output queue
+ * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets.
+ * - SLI_PKT_OUT_BMODE
+ */
+#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0
+
+/* 1 register (64-bit) - 2 bits for each output queue
+ * - Assign PCIE port for Output queues
+ * - SLI_PKT_PCIE_PORT.
+ */
+#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0
+
+/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold
+ * & Time Threshold. The same setting applies to all 32 queues.
+ * The register is defined as a 64-bit registers, but we use the
+ * 32-bit offsets to define distinct addresses.
+ */
+#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120
+#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124
+
+/* 1 (64-bit register) for Output Queue backpressure across all rings. */
+#define CN6XXX_SLI_OQ_WMARK 0x1180
+
+/* 1 register to control output queue global backpressure & ring enable. */
+#define CN6XXX_SLI_PKT_CTL 0x1220
+
+/*------- Output Queue Macros ---------*/
+#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \
+ (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_SIZE(oq) \
+ (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \
+ (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \
+ (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \
+ (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET))
+
+/*######################### DMA Counters #########################*/
+
+/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */
+#define CN6XXX_DMA_CNT_START 0x0400
+
+/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values
+ * SLI_DMA_0_TIM
+ */
+#define CN6XXX_DMA_TIM_START 0x0420
+
+/* 2 registers (64-bit) - DMA count & Time Interrupt threshold -
+ * SLI_DMA_0_INT_LEVEL
+ */
+#define CN6XXX_DMA_INT_LEVEL_START 0x03E0
+
+/* Each DMA register is at a 16-byte Offset in BAR0 */
+#define CN6XXX_DMA_OFFSET 0x10
+
+/*---------- DMA Counter Macros ---------*/
+#define CN6XXX_DMA_CNT(dq) \
+ (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \
+ (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET))
+
+#define CN6XXX_DMA_TIM(dq) \
+ (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET))
+
+/*######################## INTERRUPTS #########################*/
+
+/* 1 register (64-bit) for Interrupt Summary */
+#define CN6XXX_SLI_INT_SUM64 0x0330
+
+/* 1 register (64-bit) for Interrupt Enable */
+#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340
+#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350
+
+/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */
+#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150
+
+/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */
+#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160
+
+/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */
+#define CN6XXX_SLI_PKT_CNT_INT 0x1130
+
+/* 1 register (32-bit) to indicate which Output Queue reached time threshold */
+#define CN6XXX_SLI_PKT_TIME_INT 0x1140
+
+/*------------------ Interrupt Masks ----------------*/
+
+#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1)
+#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2)
+#define CN6XXX_INTR_IO2BIG_ERR BIT(3)
+#define CN6XXX_INTR_PKT_COUNT BIT(4)
+#define CN6XXX_INTR_PKT_TIME BIT(5)
+#define CN6XXX_INTR_M0UPB0_ERR BIT(8)
+#define CN6XXX_INTR_M0UPWI_ERR BIT(9)
+#define CN6XXX_INTR_M0UNB0_ERR BIT(10)
+#define CN6XXX_INTR_M0UNWI_ERR BIT(11)
+#define CN6XXX_INTR_M1UPB0_ERR BIT(12)
+#define CN6XXX_INTR_M1UPWI_ERR BIT(13)
+#define CN6XXX_INTR_M1UNB0_ERR BIT(14)
+#define CN6XXX_INTR_M1UNWI_ERR BIT(15)
+#define CN6XXX_INTR_MIO_INT0 BIT(16)
+#define CN6XXX_INTR_MIO_INT1 BIT(17)
+#define CN6XXX_INTR_MAC_INT0 BIT(18)
+#define CN6XXX_INTR_MAC_INT1 BIT(19)
+
+#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32)
+#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33)
+#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34)
+#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35)
+#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36)
+#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37)
+#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48)
+#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49)
+#define CN6XXX_INTR_POUT_ERR BIT_ULL(50)
+#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51)
+#define CN6XXX_INTR_PGL_ERR BIT_ULL(52)
+#define CN6XXX_INTR_PDI_ERR BIT_ULL(53)
+#define CN6XXX_INTR_POP_ERR BIT_ULL(54)
+#define CN6XXX_INTR_PINS_ERR BIT_ULL(55)
+#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56)
+#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57)
+#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60)
+
+#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME)
+
+#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME)
+
+#define CN6XXX_INTR_DMA_DATA \
+ (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA)
+
+#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \
+ CN6XXX_INTR_PKT_COUNT)
+
+/* Sum of interrupts for all PCI-Express Data Interrupts */
+#define CN6XXX_INTR_PCIE_DATA \
+ (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA)
+
+#define CN6XXX_INTR_MIO \
+ (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1)
+
+#define CN6XXX_INTR_MAC \
+ (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1)
+
+/* Sum of interrupts for error events */
+#define CN6XXX_INTR_ERR \
+ (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \
+ | CN6XXX_INTR_IO2BIG_ERR \
+ | CN6XXX_INTR_M0UPB0_ERR \
+ | CN6XXX_INTR_M0UPWI_ERR \
+ | CN6XXX_INTR_M0UNB0_ERR \
+ | CN6XXX_INTR_M0UNWI_ERR \
+ | CN6XXX_INTR_M1UPB0_ERR \
+ | CN6XXX_INTR_M1UPWI_ERR \
+ | CN6XXX_INTR_M1UNB0_ERR \
+ | CN6XXX_INTR_M1UNWI_ERR \
+ | CN6XXX_INTR_INSTR_DB_OF_ERR \
+ | CN6XXX_INTR_SLIST_DB_OF_ERR \
+ | CN6XXX_INTR_POUT_ERR \
+ | CN6XXX_INTR_PIN_BP_ERR \
+ | CN6XXX_INTR_PGL_ERR \
+ | CN6XXX_INTR_PDI_ERR \
+ | CN6XXX_INTR_POP_ERR \
+ | CN6XXX_INTR_PINS_ERR \
+ | CN6XXX_INTR_SPRT0_ERR \
+ | CN6XXX_INTR_SPRT1_ERR \
+ | CN6XXX_INTR_ILL_PAD_ERR)
+
+/* Programmed Mask for Interrupt Sum */
+#define CN6XXX_INTR_MASK \
+ (CN6XXX_INTR_PCIE_DATA \
+ | CN6XXX_INTR_DMA0_FORCE \
+ | CN6XXX_INTR_DMA1_FORCE \
+ | CN6XXX_INTR_MIO \
+ | CN6XXX_INTR_MAC \
+ | CN6XXX_INTR_ERR)
+
+#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80
+#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90
+#define CN6XXX_SLI_S2M_PORTX_CTL(port) \
+ (CN6XXX_SLI_S2M_PORT0_CTL + ((port) * 0x10))
+
+#define CN6XXX_SLI_INT_ENB64(port) \
+ (CN6XXX_SLI_INT_ENB64_PORT0 + ((port) * 0x10))
+
+#define CN6XXX_SLI_MAC_NUMBER 0x3E00
+
+/* CN6XXX BAR1 Index registers. */
+#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL
+#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL
+
+#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000
+#define CN6XXX_PCI_BAR1_OFFSET 0x8
+
+#define CN6XXX_BAR1_REG(idx, port) \
+ (CN6XXX_BAR1_INDEX_START + ((port) * CN6XXX_PEM_OFFSET) + \
+ (CN6XXX_PCI_BAR1_OFFSET * (idx)))
+
+/*############################ DPI #########################*/
+
+#define CN6XXX_DPI_CTL 0x0001df0000000040ULL
+
+#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL
+
+#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL
+
+#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL
+
+#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL
+
+#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL
+
+#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8))
+
+#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL
+
+#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \
+ (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
+
+#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL
+#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL
+#define CN6XXX_DPI_SLI_PRTX_CFG(port) \
+ (CN6XXX_DPI_SLI_PRT0_CFG + ((port) * 0x10))
+
+#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58)
+#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57)
+#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56)
+#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15)
+#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14)
+
+#define CN6XXX_DPI_DMA_CTL_MASK \
+ (CN6XXX_DPI_DMA_COMMIT_MODE | \
+ CN6XXX_DPI_DMA_PKT_HP | \
+ CN6XXX_DPI_DMA_PKT_EN | \
+ CN6XXX_DPI_DMA_O_ES | \
+ CN6XXX_DPI_DMA_O_MODE)
+
+/*############################ CIU #########################*/
+
+#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL
+#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL
+
+/*############################ MIO #########################*/
+#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL
+#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL
+#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL
+#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL
+#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL
+#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL
+#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL
+#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL
+#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL
+#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL
+#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL
+#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL
+
+#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL
+#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL
+
+#define CN6XXX_MIO_QLM_CFG_MASK 0x7
+
+/*############################ LMC #########################*/
+
+#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL
+#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
new file mode 100644
index 000000000..30254e4cf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -0,0 +1,183 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_device.h"
+#include "cn68xx_regs.h"
+
+static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
+{
+ u32 i;
+ u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
+
+ lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
+
+ for (i = 0; i < 6; i++) {
+ /* Prevent service of instruction queue for all DMA engines
+ * Engine 5 will remain 0. Engines 0 - 4 will be setup by
+ * core.
+ */
+ lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
+ lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
+ dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
+ lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
+ }
+
+ /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
+ * separately.
+ */
+
+ lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
+ dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
+ lio_pci_readq(oct, CN6XXX_DPI_CTL));
+}
+
+static int lio_cn68xx_soft_reset(struct octeon_device *oct)
+{
+ lio_cn6xxx_soft_reset(oct);
+ lio_cn68xx_set_dpi_regs(oct);
+
+ return 0;
+}
+
+static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u64 pktctl, tx_pipe, max_oqs;
+
+ pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
+
+ /* 68XX specific */
+ max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx));
+ tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
+ tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
+ tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
+ octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
+
+ if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
+ pktctl |= 0xF;
+ else
+ /* Disable per-port backpressure. */
+ pktctl &= ~0xF;
+ octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
+}
+
+static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
+{
+ lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
+ lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
+ lio_cn6xxx_enable_error_reporting(oct);
+
+ lio_cn6xxx_setup_global_input_regs(oct);
+ lio_cn68xx_setup_pkt_ctl_regs(oct);
+ lio_cn6xxx_setup_global_output_regs(oct);
+
+ /* Default error timeout value should be 0x200000 to avoid host hang
+ * when reads invalid register
+ */
+ octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
+
+ return 0;
+}
+
+static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
+{
+ u32 val = 0;
+
+ /* Set M_VEND1_DRP and M_VEND0_DRP bits */
+ pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
+ val |= 0x3;
+ pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
+}
+
+static int lio_is_210nv(struct octeon_device *oct)
+{
+ u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
+
+ return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
+}
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
+ u16 card_type = LIO_410NV;
+
+ if (octeon_map_pci_barx(oct, 0, 0))
+ return 1;
+
+ if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
+ __func__);
+ octeon_unmap_pci_barx(oct, 0);
+ return 1;
+ }
+
+ spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
+
+ oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
+ oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
+
+ oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
+ oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
+ oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
+ oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
+
+ oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
+ oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
+ oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
+
+ oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
+ oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
+
+ oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
+ oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
+
+ lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
+
+ /* Determine variant of card */
+ if (lio_is_210nv(oct))
+ card_type = LIO_210NV;
+
+ cn68xx->conf = (struct octeon_config *)
+ oct_get_config_info(oct, card_type);
+ if (!cn68xx->conf) {
+ dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
+ __func__,
+ (card_type == LIO_410NV) ? LIO_410NV_NAME :
+ LIO_210NV_NAME);
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+ return 1;
+ }
+
+ oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
+
+ lio_cn68xx_vendor_message_fix(oct);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
new file mode 100644
index 000000000..66b8d6bf5
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -0,0 +1,27 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn68xx_device.h
+ * \brief Host Driver: Routines that perform CN68XX specific operations.
+ */
+
+#ifndef __CN68XX_DEVICE_H__
+#define __CN68XX_DEVICE_H__
+
+int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
new file mode 100644
index 000000000..0b742f09e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -0,0 +1,45 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file cn68xx_regs.h
+ * \brief Host Driver: Register Address and Register Mask values for
+ * Octeon CN68XX devices. The register map for CN66XX is the same
+ * for most registers. This file has the other registers that are
+ * 68XX-specific.
+ */
+
+#ifndef __CN68XX_REGS_H__
+#define __CN68XX_REGS_H__
+
+/*###################### REQUEST QUEUE #########################*/
+
+#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800
+
+#define CN68XX_SLI_IQ_PORT_PKIND(iq) \
+ (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
+
+/*############################ OUTPUT QUEUE #########################*/
+
+/* Starting pipe number and number of pipes used by the SLI packet output. */
+#define CN68XX_SLI_TX_PIPE 0x1230
+
+/*######################## INTERRUPTS #########################*/
+
+/*------------------ Interrupt Masks ----------------*/
+#define CN68XX_INTR_PIPE_ERR BIT_ULL(61)
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
new file mode 100644
index 000000000..882b2be06
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -0,0 +1,1814 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+
+/* OOM task polling interval */
+#define LIO_OOM_POLL_INTERVAL_MS 250
+
+#define OCTNIC_MAX_SG MAX_SKB_FRAGS
+
+/**
+ * lio_delete_glists - Delete gather lists
+ * @lio: per-network private data
+ */
+void lio_delete_glists(struct lio *lio)
+{
+ struct octnic_gather *g;
+ int i;
+
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->oct_dev->num_iqs; i++) {
+ do {
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[i]);
+ kfree(g);
+ } while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i] &&
+ lio->glists_dma_base && lio->glists_dma_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
+ }
+
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
+}
+
+/**
+ * lio_setup_glists - Setup gather lists
+ * @oct: octeon_device
+ * @lio: per-network private data
+ * @num_iqs: count of iqs to allocate
+ */
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
+{
+ struct octnic_gather *g;
+ int i, j;
+
+ lio->glist_lock =
+ kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL);
+ if (!lio->glist_lock)
+ return -ENOMEM;
+
+ lio->glist =
+ kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL);
+ if (!lio->glist) {
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
+
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
+
+ list_add_tail(&g->list, &lio->glist[i]);
+ }
+
+ if (j != lio->tx_qsize) {
+ lio_delete_glists(lio);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl)
+{
+ struct netdev_queue *netdev_queue = txq;
+
+ netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
+}
+
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb = NULL;
+ struct octeon_soft_command *sc;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return;
+ }
+
+ (*pkts_compl)++;
+ *bytes_compl += skb->len;
+}
+
+int octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct octeon_soft_command *sc;
+ struct netdev_queue *txq;
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ finfo = buf;
+ skb = finfo->skb;
+ break;
+
+ case REQTYPE_RESP_NET_SG:
+ case REQTYPE_RESP_NET:
+ sc = buf;
+ skb = sc->callback_arg;
+ break;
+
+ default:
+ return 0;
+ }
+
+ txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
+ netdev_tx_sent_queue(txq, skb->len);
+
+ return netif_xmit_stopped(txq);
+}
+
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
+{
+ struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
+ struct net_device *netdev = (struct net_device *)nctrl->netpndev;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
+
+ if (nctrl->sc_status)
+ return;
+
+ switch (nctrl->ncmd.s.cmd) {
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ case OCTNET_CMD_SET_UC_LIST:
+ break;
+
+ case OCTNET_CMD_CHANGE_MACADDR:
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ if (nctrl->ncmd.s.param1) {
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ int vfidx = nctrl->ncmd.s.param1 - 1;
+ bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
+
+ if (mac_is_admin_assigned)
+ netif_info(lio, probe, lio->netdev,
+ "MAC Address %pM is configured for VF %d\n",
+ mac, vfidx);
+ } else {
+ netif_info(lio, probe, lio->netdev,
+ " MACAddr changed to %pM\n",
+ mac);
+ }
+ break;
+
+ case OCTNET_CMD_GPIO_ACCESS:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_ID_ACTIVE:
+ netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
+
+ break;
+
+ case OCTNET_CMD_LRO_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_LRO_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_ENABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VERBOSE_DISABLE:
+ dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_VLAN_FILTER_CTL:
+ if (nctrl->ncmd.s.param1)
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter enabled\n", netdev->name);
+ else
+ dev_info(&oct->pci_dev->dev,
+ "%s VLAN filter disabled\n", netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_SET_SETTINGS:
+ dev_info(&oct->pci_dev->dev, "%s settings changed\n",
+ netdev->name);
+
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "RX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Enabled\n");
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "TX Checksum Offload Disabled\n");
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d ADDED\n",
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "VxLAN Destination UDP PORT:%d DELETED\n",
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
+
+ case OCTNET_CMD_QUEUE_COUNT_CTL:
+ netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
+ nctrl->ncmd.s.param1);
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
+ nctrl->ncmd.s.cmd);
+ }
+}
+
+void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
+{
+ bool macaddr_changed = false;
+ struct net_device *netdev;
+ struct lio *lio;
+
+ rtnl_lock();
+
+ netdev = oct->props[0].netdev;
+ lio = GET_LIO(netdev);
+
+ lio->linfo.macaddr_is_admin_asgnd = true;
+
+ if (!ether_addr_equal(netdev->dev_addr, mac)) {
+ macaddr_changed = true;
+ eth_hw_addr_set(netdev, mac);
+ ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
+ }
+
+ rtnl_unlock();
+
+ if (macaddr_changed)
+ dev_info(&oct->pci_dev->dev,
+ "PF changed VF's MAC address to %pM\n", mac);
+
+ /* no need to notify the firmware of the macaddr change because
+ * the PF did that already
+ */
+}
+
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ struct net_device *netdev = oct->props[0].netdev;
+ struct lio *lio = GET_LIO(netdev);
+ struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no];
+
+ queue_delayed_work(wq->wq, &wq->wk.work,
+ msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
+}
+
+static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+ struct octeon_device *oct = lio->oct_dev;
+ int q_no = wk->ctxul;
+ struct octeon_droq *droq = oct->droq[q_no];
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING) || !droq)
+ return;
+
+ if (octeon_retry_droq_refill(droq))
+ octeon_schedule_rxq_oom_work(oct, droq);
+}
+
+int setup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q, q_no;
+
+ for (q = 0; q < oct->num_oqs; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ wq = &lio->rxq_status_wq[q_no];
+ wq->wq = alloc_workqueue("rxq-oom-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!wq->wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&wq->wk.work,
+ octnet_poll_check_rxq_oom_status);
+ wq->wk.ctxptr = lio;
+ wq->wk.ctxul = q_no;
+ }
+
+ return 0;
+}
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct cavium_wq *wq;
+ int q_no;
+
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ wq = &lio->rxq_status_wq[q_no];
+ if (wq->wq) {
+ cancel_delayed_work_sync(&wq->wk.work);
+ destroy_workqueue(wq->wq);
+ wq->wq = NULL;
+ }
+ }
+}
+
+/* Runs in interrupt context. */
+static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+ struct net_device *netdev;
+ struct lio *lio;
+
+ netdev = oct->props[iq->ifidx].netdev;
+
+ /* This is needed because the first IQ does not have
+ * a netdev associated with it.
+ */
+ if (!netdev)
+ return;
+
+ lio = GET_LIO(netdev);
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ netif_wake_subqueue(netdev, iq->q_index);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ }
+}
+
+/**
+ * octeon_setup_droq - Setup output queue
+ * @oct: octeon device
+ * @q_no: which queue
+ * @num_descs: how many descriptors
+ * @desc_size: size of each descriptor
+ * @app_ctx: application context
+ */
+static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
+ int desc_size, void *app_ctx)
+{
+ int ret_val;
+
+ dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
+ /* droq creation and local register settings. */
+ ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
+ if (ret_val < 0)
+ return ret_val;
+
+ if (ret_val == 1) {
+ dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
+ return 0;
+ }
+
+ /* Enable the droq queues */
+ octeon_set_droq_pkt_op(oct, q_no, 1);
+
+ /* Send Credit for Octeon Output queues. Credits are always
+ * sent after the output queue is enabled.
+ */
+ writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
+
+ return ret_val;
+}
+
+/**
+ * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
+ * @octeon_id:octeon device id.
+ * @skbuff: skbuff struct to be passed to network layer.
+ * @len: size of total data received.
+ * @rh: Control header associated with the packet
+ * @param: additional control data with the packet
+ * @arg: farg registered in droq_ops
+ */
+static void
+liquidio_push_packet(u32 __maybe_unused octeon_id,
+ void *skbuff,
+ u32 len,
+ union octeon_rh *rh,
+ void *param,
+ void *arg)
+{
+ struct net_device *netdev = (struct net_device *)arg;
+ struct octeon_droq *droq =
+ container_of(param, struct octeon_droq, napi);
+ struct sk_buff *skb = (struct sk_buff *)skbuff;
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct napi_struct *napi = param;
+ u16 vtag = 0;
+ u32 r_dh_off;
+ u64 ns;
+
+ if (netdev) {
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ /* Do not proceed if the interface is not in RUNNING state. */
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
+ recv_buffer_free(skb);
+ droq->stats.rx_dropped++;
+ return;
+ }
+
+ skb->dev = netdev;
+
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ + pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
+
+ if (oct->ptp_enable) {
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at
+ * the beginning of the packet.
+ */
+ if (ifstate_check
+ (lio,
+ LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data + r_dh_off),
+ sizeof(ns));
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns +
+ lio->ptp_adjust);
+ }
+ }
+ }
+
+ if (rh->r_dh.has_hash) {
+ __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
+ u32 hash = be32_to_cpu(*hash_be);
+
+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ r_dh_off -= BYTES_PER_DHLEN_UNIT;
+ }
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if ((netdev->features & NETIF_F_RXCSUM) &&
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ ((rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED) ==
+ CNNIC_CSUM_VERIFIED))))
+ /* checksum has already been verified */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ rh->r_dh.vlan) {
+ u16 priority = rh->r_dh.priority;
+ u16 vid = rh->r_dh.vlan;
+
+ vtag = (priority << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ napi_gro_receive(napi, skb);
+
+ droq->stats.rx_bytes_received += len -
+ rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
+ droq->stats.rx_pkts_received++;
+ } else {
+ recv_buffer_free(skb);
+ }
+}
+
+/**
+ * napi_schedule_wrapper - wrapper for calling napi_schedule
+ * @param: parameters to pass to napi_schedule
+ *
+ * Used when scheduling on different CPUs
+ */
+static void napi_schedule_wrapper(void *param)
+{
+ struct napi_struct *napi = param;
+
+ napi_schedule(napi);
+}
+
+/**
+ * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
+ * @arg: pointer to octeon output queue
+ */
+static void liquidio_napi_drv_callback(void *arg)
+{
+ struct octeon_device *oct;
+ struct octeon_droq *droq = arg;
+ int this_cpu = smp_processor_id();
+
+ oct = droq->oct_dev;
+
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
+ droq->cpu_id == this_cpu) {
+ napi_schedule_irqoff(&droq->napi);
+ } else {
+ INIT_CSD(&droq->csd, napi_schedule_wrapper, &droq->napi);
+ smp_call_function_single_async(droq->cpu_id, &droq->csd);
+ }
+}
+
+/**
+ * liquidio_napi_poll - Entry point for NAPI polling
+ * @napi: NAPI structure
+ * @budget: maximum number of items to process
+ */
+static int liquidio_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
+ struct octeon_droq *droq;
+ int tx_done = 0, iq_no;
+ int work_done;
+
+ droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+
+ /* Handle Droq descriptors */
+ work_done = octeon_droq_process_poll_pkts(oct, droq, budget);
+
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* TODO: move this check to inside octeon_flush_iq,
+ * once check_db_timeout is removed
+ */
+ if (atomic_read(&iq->instr_pending))
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, budget);
+ else
+ tx_done = 1;
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ /* sub-queue status update */
+ lio_update_txq_status(oct, iq_no);
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
+
+#define MAX_REG_CNT 2000000U
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ napi_complete_done(napi, work_done);
+
+ octeon_enable_irq(droq->oct_dev, droq->q_no);
+ return 0;
+ }
+
+ return (!tx_done) ? (budget) : (work_done);
+}
+
+/**
+ * liquidio_setup_io_queues - Setup input and output queues
+ * @octeon_dev: octeon device
+ * @ifidx: Interface index
+ * @num_iqs: input io queue count
+ * @num_oqs: output io queue count
+ *
+ * Note: Queues are with respect to the octeon device. Thus
+ * an input queue is for egress packets, and output queues
+ * are for ingress packets.
+ */
+int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
+ u32 num_iqs, u32 num_oqs)
+{
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
+ struct octeon_droq *droq;
+ struct napi_struct *napi;
+ int cpu_id_modulus;
+ int num_tx_descs;
+ struct lio *lio;
+ int retval = 0;
+ int q, q_no;
+ int cpu_id;
+
+ netdev = octeon_dev->props[ifidx].netdev;
+
+ lio = GET_LIO(netdev);
+
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
+
+ /* set up DROQs. */
+ for (q = 0; q < num_oqs; q++) {
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "%s index:%d linfo.rxpciq.s.q_no:%d\n",
+ __func__, q, q_no);
+ retval = octeon_setup_droq(
+ octeon_dev, q_no,
+ CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
+ lio->ifidx),
+ NULL);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "%s : Runtime DROQ(RxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ droq = octeon_dev->droq[q_no];
+ napi = &droq->napi;
+ dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
+ (u64)netdev, (u64)octeon_dev);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
+
+ /* designate a CPU for this droq */
+ droq->cpu_id = cpu_id;
+ cpu_id++;
+ if (cpu_id >= cpu_id_modulus)
+ cpu_id = 0;
+
+ octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
+ }
+
+ if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
+ /* 23XX PF/VF can send/recv control messages (via the first
+ * PF/VF-owned droq) from the firmware even if the ethX
+ * interface is down, so that's why poll_mode must be off
+ * for the first droq.
+ */
+ octeon_dev->droq[0]->ops.poll_mode = 0;
+ }
+
+ /* set up IQs. */
+ for (q = 0; q < num_iqs; q++) {
+ num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
+ octeon_get_conf(octeon_dev), lio->ifidx);
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ " %s : Runtime IQ(TxQ) creation failed.\n",
+ __func__);
+ return 1;
+ }
+
+ /* XPS */
+ if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
+ octeon_dev->ioq_vector) {
+ struct octeon_ioq_vector *ioq_vector;
+
+ ioq_vector = &octeon_dev->ioq_vector[q];
+ netif_set_xps_queue(netdev,
+ &ioq_vector->affinity_mask,
+ ioq_vector->iq_index);
+ }
+ }
+
+ return 0;
+}
+
+static
+int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ } else {
+ if (ret & MSIX_PO_INT) {
+ if (OCTEON_CN23XX_VF(oct))
+ dev_err(&oct->pci_dev->dev,
+ "should not come here should not get rx when poll mode = 0 for vf\n");
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ return 1;
+ }
+ /* this will be flushed periodically by check iq db */
+ if (ret & MSIX_PI_INT)
+ return 0;
+ }
+
+ return 0;
+}
+
+irqreturn_t
+liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
+{
+ struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
+ struct octeon_device *oct = ioq_vector->oct_dev;
+ struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
+ u64 ret;
+
+ ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
+
+ if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
+ liquidio_schedule_msix_droq_pkt_handler(droq, ret);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
+ * @oct: octeon device
+ */
+static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct octeon_droq *droq;
+ u64 oq_no;
+
+ if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+ oq_no++) {
+ if (!(oct->droq_intr & BIT_ULL(oq_no)))
+ continue;
+
+ droq = oct->droq[oq_no];
+
+ if (droq->ops.poll_mode) {
+ droq->ops.napi_fn(droq);
+ oct_priv->napi_mask |= BIT_ULL(oq_no);
+ } else {
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ }
+ }
+}
+
+/**
+ * liquidio_legacy_intr_handler - Interrupt handler for octeon
+ * @irq: unused
+ * @dev: octeon device
+ */
+static
+irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
+{
+ struct octeon_device *oct = (struct octeon_device *)dev;
+ irqreturn_t ret;
+
+ /* Disable our interrupts for the duration of ISR */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ ret = oct->fn_list.process_interrupt_regs(oct);
+
+ if (ret == IRQ_HANDLED)
+ liquidio_schedule_droq_pkt_handlers(oct);
+
+ /* Re-enable our interrupts */
+ if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ return ret;
+}
+
+/**
+ * octeon_setup_interrupt - Setup interrupt for octeon device
+ * @oct: octeon device
+ * @num_ioqs: number of queues
+ *
+ * Enable interrupt in Octeon device as given in the PCI interrupt mask.
+ */
+int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
+{
+ struct msix_entry *msix_entries;
+ char *queue_irq_names = NULL;
+ int i, num_interrupts = 0;
+ int num_alloc_ioq_vectors;
+ char *aux_irq_name = NULL;
+ int num_ioq_vectors;
+ int irqret, err;
+
+ if (oct->msix_on) {
+ oct->num_msix_irqs = num_ioqs;
+ if (OCTEON_CN23XX_PF(oct)) {
+ num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
+
+ /* one non ioq interrupt for handling
+ * sli_mac_pf_int_sum
+ */
+ oct->num_msix_irqs += 1;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
+ }
+
+ /* allocate storage for the names assigned to each irq */
+ oct->irq_name_storage =
+ kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage) {
+ dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
+ return -ENOMEM;
+ }
+
+ queue_irq_names = oct->irq_name_storage;
+
+ if (OCTEON_CN23XX_PF(oct))
+ aux_irq_name = &queue_irq_names
+ [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
+
+ oct->msix_entries = kcalloc(oct->num_msix_irqs,
+ sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!oct->msix_entries) {
+ dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return -ENOMEM;
+ }
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+
+ /*Assumption is that pf msix vectors start from pf srn to pf to
+ * trs and not from 0. if not change this code
+ */
+ if (OCTEON_CN23XX_PF(oct)) {
+ for (i = 0; i < oct->num_msix_irqs - 1; i++)
+ msix_entries[i].entry =
+ oct->sriov_info.pf_srn + i;
+
+ msix_entries[oct->num_msix_irqs - 1].entry =
+ oct->sriov_info.trs;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ for (i = 0; i < oct->num_msix_irqs; i++)
+ msix_entries[i].entry = i;
+ }
+ num_alloc_ioq_vectors = pci_enable_msix_range(
+ oct->pci_dev, msix_entries,
+ oct->num_msix_irqs,
+ oct->num_msix_irqs);
+ if (num_alloc_ioq_vectors < 0) {
+ dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return num_alloc_ioq_vectors;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
+
+ num_ioq_vectors = oct->num_msix_irqs;
+ /* For PF, there is one non-ioq interrupt handler */
+ if (OCTEON_CN23XX_PF(oct)) {
+ num_ioq_vectors -= 1;
+
+ snprintf(aux_irq_name, INTRNAMSIZ,
+ "LiquidIO%u-pf%u-aux", oct->octeon_id,
+ oct->pf_num);
+ irqret = request_irq(
+ msix_entries[num_ioq_vectors].vector,
+ liquidio_legacy_intr_handler, 0,
+ aux_irq_name, oct);
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ oct->msix_entries = NULL;
+ return irqret;
+ }
+ }
+ for (i = 0 ; i < num_ioq_vectors ; i++) {
+ if (OCTEON_CN23XX_PF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+ INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, i);
+
+ if (OCTEON_CN23XX_VF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
+ INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, i);
+
+ irqret = request_irq(msix_entries[i].vector,
+ liquidio_msix_intr_handler, 0,
+ &queue_irq_names[IRQ_NAME_OFF(i)],
+ &oct->ioq_vector[i]);
+
+ if (irqret) {
+ dev_err(&oct->pci_dev->dev,
+ "Request_irq failed for MSIX interrupt Error: %d\n",
+ irqret);
+ /* Freeing the non-ioq irq vector here . */
+ free_irq(msix_entries[num_ioq_vectors].vector,
+ oct);
+
+ while (i) {
+ i--;
+ /* clearing affinity mask. */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ oct->msix_entries = NULL;
+ return irqret;
+ }
+ oct->ioq_vector[i].vector = msix_entries[i].vector;
+ /* assign the cpu mask for this msix interrupt vector */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ &oct->ioq_vector[i].affinity_mask
+ );
+ }
+ dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
+ oct->octeon_id);
+ } else {
+ err = pci_enable_msi(oct->pci_dev);
+ if (err)
+ dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
+ err);
+ else
+ oct->flags |= LIO_FLAG_MSI_ENABLED;
+
+ /* allocate storage for the names assigned to the irq */
+ oct->irq_name_storage = kzalloc(INTRNAMSIZ, GFP_KERNEL);
+ if (!oct->irq_name_storage)
+ return -ENOMEM;
+
+ queue_irq_names = oct->irq_name_storage;
+
+ if (OCTEON_CN23XX_PF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-pf%u-rxtx-%u",
+ oct->octeon_id, oct->pf_num, 0);
+
+ if (OCTEON_CN23XX_VF(oct))
+ snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
+ "LiquidIO%u-vf%u-rxtx-%u",
+ oct->octeon_id, oct->vf_num, 0);
+
+ irqret = request_irq(oct->pci_dev->irq,
+ liquidio_legacy_intr_handler,
+ IRQF_SHARED,
+ &queue_irq_names[IRQ_NAME_OFF(0)], oct);
+ if (irqret) {
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
+ irqret);
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ return irqret;
+ }
+ }
+ return 0;
+}
+
+/**
+ * liquidio_change_mtu - Net device change_mtu
+ * @netdev: network device
+ * @new_mtu: the new max transmit unit size
+ */
+int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ int ret = 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
+
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_CHANGE_MTU;
+ ncmd->s.param1 = new_mtu;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ ret = octeon_send_soft_command(oct, sc);
+ if (ret == IQ_SEND_FAILED) {
+ netif_info(lio, rx_err, lio->netdev, "Failed to change MTU\n");
+ octeon_free_soft_command(oct, sc);
+ return -EINVAL;
+ }
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ ret = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (ret)
+ return ret;
+
+ if (sc->sc_status) {
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -EINVAL;
+ }
+
+ netdev->mtu = new_mtu;
+ lio->mtu = new_mtu;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ return 0;
+}
+
+int lio_wait_for_clean_oq(struct octeon_device *oct)
+{
+ int retry = 100, pending_pkts = 0;
+ int idx;
+
+ do {
+ pending_pkts = 0;
+
+ for (idx = 0; idx < MAX_OCTEON_OUTPUT_QUEUES(oct); idx++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(idx)))
+ continue;
+ pending_pkts +=
+ atomic_read(&oct->droq[idx]->pkts_pending);
+ }
+
+ if (pending_pkts > 0)
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pending_pkts;
+}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp =
+ (struct oct_nic_stats_resp *)sc->virtrptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if (status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+ rstats->red_drops = rsp_rstats->red_drops;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_total_mcast = rsp_rstats->fw_total_mcast;
+ rstats->fw_total_bcast = rsp_rstats->fw_total_bcast;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_total_mcast_sent = rsp_tstats->fw_total_mcast_sent;
+ tstats->fw_total_bcast_sent = rsp_tstats->fw_total_bcast_sent;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_pki = rsp_tstats->fw_err_pki;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
+ resp->status = -1;
+ }
+}
+
+static int lio_fetch_vf_stats(struct lio *lio)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_nic_vf_stats_resp *resp;
+
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_vf_stats_resp),
+ 0);
+
+ if (!sc) {
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
+ retval = -ENOMEM;
+ goto lio_fetch_vf_stats_exit;
+ }
+
+ resp = (struct oct_nic_vf_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_vf_stats_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_VF_PORT_STATS, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ goto lio_fetch_vf_stats_exit;
+ }
+
+ retval =
+ wait_for_sc_completion_timeout(oct_dev, sc,
+ (2 * LIO_SC_MAX_TMO_MS));
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "sc OPCODE_NIC_VF_PORT_STATS command failed\n");
+ goto lio_fetch_vf_stats_exit;
+ }
+
+ if (sc->sc_status != OCTEON_REQUEST_TIMEOUT && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->spoofmac_cnt,
+ (sizeof(u64)) >> 3);
+
+ if (resp->spoofmac_cnt != 0) {
+ dev_warn(&oct_dev->pci_dev->dev,
+ "%llu Spoofed packets detected\n",
+ resp->spoofmac_cnt);
+ }
+ }
+ WRITE_ONCE(sc->caller_is_done, 1);
+
+lio_fetch_vf_stats_exit:
+ return retval;
+}
+
+void lio_fetch_stats(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = wk->ctxptr;
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_resp *resp;
+ unsigned long time_in_jiffies;
+ int retval;
+
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ /* report spoofchk every 2 seconds */
+ if (!(oct_dev->vfstats_poll % LIO_VFSTATS_POLL) &&
+ (oct_dev->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP) &&
+ oct_dev->sriov_info.num_vfs_alloced) {
+ lio_fetch_vf_stats(lio);
+ }
+
+ oct_dev->vfstats_poll++;
+ }
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ 0);
+
+ if (!sc) {
+ dev_err(&oct_dev->pci_dev->dev, "Soft command allocation failed\n");
+ goto lio_fetch_stats_exit;
+ }
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ goto lio_fetch_stats_exit;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct_dev, sc,
+ (2 * LIO_SC_MAX_TMO_MS));
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev, "sc OPCODE_NIC_PORT_STATS command failed\n");
+ goto lio_fetch_stats_exit;
+ }
+
+ octnet_nic_stats_callback(oct_dev, sc->sc_status, sc);
+ WRITE_ONCE(sc->caller_is_done, true);
+
+lio_fetch_stats_exit:
+ time_in_jiffies = msecs_to_jiffies(LIQUIDIO_NDEV_STATS_POLL_TIME_MS);
+ if (ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ schedule_delayed_work(&lio->stats_wk.work, time_in_jiffies);
+
+ return;
+}
+
+int liquidio_set_speed(struct lio *lio, int speed)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ int retval;
+ u32 var;
+
+ if (oct->speed_setting == speed)
+ return 0;
+
+ if (!OCTEON_CN23XX_PF(oct)) {
+ dev_err(&oct->pci_dev->dev, "%s: SET SPEED only for PF\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ 0);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_SET;
+ ncmd->s.param1 = speed;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ retval = -EBUSY;
+ } else {
+ /* Wait for response or timeout */
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = resp->status;
+
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "%s failed, retval=%d\n",
+ __func__, retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ return -EIO;
+ }
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ if (var != speed) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: setting failed speed= %x, expect %x\n",
+ __func__, var, speed);
+ }
+
+ oct->speed_setting = var;
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+int liquidio_get_speed(struct lio *lio)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ int retval;
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp),
+ 0);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_SPEED_GET;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ retval = -EIO;
+ } else {
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev,
+ "%s failed retval=%d\n", __func__, retval);
+ retval = -EIO;
+ } else {
+ u32 var;
+
+ var = be32_to_cpu((__force __be32)resp->speed);
+ oct->speed_setting = var;
+ if (var == 0xffff) {
+ /* unable to access boot variables
+ * get the default value based on the NIC type
+ */
+ if (oct->subsystem_id ==
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id ==
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
+ oct->no_speed_setting = 1;
+ oct->speed_setting = 25;
+ } else {
+ oct->speed_setting = 10;
+ }
+ }
+
+ }
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+int liquidio_set_fec(struct lio *lio, int on_off)
+{
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ struct octeon_device *oct;
+ union octnet_cmd *ncmd;
+ int retval;
+ u32 var;
+
+ oct = lio->oct_dev;
+
+ if (oct->props[lio->ifidx].fec == on_off)
+ return 0;
+
+ if (!OCTEON_CN23XX_PF(oct)) {
+ dev_err(&oct->pci_dev->dev, "%s: SET FEC only for PF\n",
+ __func__);
+ return -1;
+ }
+
+ if (oct->speed_boot != 25) {
+ dev_err(&oct->pci_dev->dev,
+ "Set FEC only when link speed is 25G during insmod\n");
+ return -1;
+ }
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp), 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to allocate soft command\n");
+ return -ENOMEM;
+ }
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_FEC_SET;
+ ncmd->s.param1 = on_off;
+ /* SEAPI_CMD_FEC_DISABLE(0) or SEAPI_CMD_FEC_RS(1) */
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev, "Failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (-EIO);
+
+ var = be32_to_cpu(resp->fec_setting);
+ resp->fec_setting = var;
+ if (var != on_off) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting failed fec= %x, expect %x\n",
+ var, on_off);
+ oct->props[lio->ifidx].fec = var;
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
+ oct->props[lio->ifidx].fec = 1;
+ else
+ oct->props[lio->ifidx].fec = 0;
+ }
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ if (oct->props[lio->ifidx].fec !=
+ oct->props[lio->ifidx].fec_boot) {
+ dev_dbg(&oct->pci_dev->dev,
+ "Reload driver to change fec to %s\n",
+ oct->props[lio->ifidx].fec ? "on" : "off");
+ }
+
+ return retval;
+}
+
+int liquidio_get_fec(struct lio *lio)
+{
+ struct oct_nic_seapi_resp *resp;
+ struct octeon_soft_command *sc;
+ struct octeon_device *oct;
+ union octnet_cmd *ncmd;
+ int retval;
+ u32 var;
+
+ oct = lio->oct_dev;
+
+ sc = octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ sizeof(struct oct_nic_seapi_resp), 0);
+ if (!sc)
+ return -ENOMEM;
+
+ ncmd = sc->virtdptr;
+ resp = sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_seapi_resp));
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = SEAPI_CMD_FEC_GET;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_UBOOT_CTL, 0, 0, 0);
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_info(&oct->pci_dev->dev,
+ "%s: Failed to send soft command\n", __func__);
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ var = be32_to_cpu(resp->fec_setting);
+ resp->fec_setting = var;
+ if (resp->fec_setting == SEAPI_CMD_FEC_SET_RS)
+ oct->props[lio->ifidx].fec = 1;
+ else
+ oct->props[lio->ifidx].fec = 0;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ if (oct->props[lio->ifidx].fec !=
+ oct->props[lio->ifidx].fec_boot) {
+ dev_dbg(&oct->pci_dev->dev,
+ "Reload driver to change fec to %s\n",
+ oct->props[lio->ifidx].fec ? "on" : "off");
+ }
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
new file mode 100644
index 000000000..2c10ae3f7
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -0,0 +1,3182 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/pci.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
+
+static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs);
+
+struct oct_intrmod_resp {
+ u64 rh;
+ struct oct_intrmod_cfg intrmod;
+ u64 status;
+};
+
+struct oct_mdio_cmd_resp {
+ u64 rh;
+ struct oct_mdio_cmd resp;
+ u64 status;
+};
+
+#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp))
+
+/* Octeon's interface mode of operation */
+enum {
+ INTERFACE_MODE_DISABLED,
+ INTERFACE_MODE_RGMII,
+ INTERFACE_MODE_GMII,
+ INTERFACE_MODE_SPI,
+ INTERFACE_MODE_PCIE,
+ INTERFACE_MODE_XAUI,
+ INTERFACE_MODE_SGMII,
+ INTERFACE_MODE_PICMG,
+ INTERFACE_MODE_NPI,
+ INTERFACE_MODE_LOOP,
+ INTERFACE_MODE_SRIO,
+ INTERFACE_MODE_ILK,
+ INTERFACE_MODE_RXAUI,
+ INTERFACE_MODE_QSGMII,
+ INTERFACE_MODE_AGL,
+ INTERFACE_MODE_XLAUI,
+ INTERFACE_MODE_XFI,
+ INTERFACE_MODE_10G_KR,
+ INTERFACE_MODE_40G_KR4,
+ INTERFACE_MODE_MIXED,
+};
+
+#define OCT_ETHTOOL_REGDUMP_LEN 4096
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX (4096 * 11)
+#define OCT_ETHTOOL_REGDUMP_LEN_23XX_VF (4096 * 2)
+#define OCT_ETHTOOL_REGSVER 1
+
+/* statistics of PF */
+static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+
+ "tx_total_sent",
+ "tx_total_fwd",
+ "tx_err_pko",
+ "tx_err_pki",
+ "tx_err_link",
+ "tx_err_drop",
+
+ "tx_tso",
+ "tx_tso_packets",
+ "tx_tso_err",
+ "tx_vxlan",
+
+ "tx_mcast",
+ "tx_bcast",
+
+ "mac_tx_total_pkts",
+ "mac_tx_total_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_ctl_packets",
+ "mac_tx_total_collisions",
+ "mac_tx_one_collision",
+ "mac_tx_multi_collision",
+ "mac_tx_max_collision_fail",
+ "mac_tx_max_deferral_fail",
+ "mac_tx_fifo_err",
+ "mac_tx_runts",
+
+ "rx_total_rcvd",
+ "rx_total_fwd",
+ "rx_mcast",
+ "rx_bcast",
+ "rx_jabber_err",
+ "rx_l2_err",
+ "rx_frame_err",
+ "rx_err_pko",
+ "rx_err_link",
+ "rx_err_drop",
+
+ "rx_vxlan",
+ "rx_vxlan_err",
+
+ "rx_lro_pkts",
+ "rx_lro_bytes",
+ "rx_total_lro",
+
+ "rx_lro_aborts",
+ "rx_lro_aborts_port",
+ "rx_lro_aborts_seq",
+ "rx_lro_aborts_tsval",
+ "rx_lro_aborts_timer",
+ "rx_fwd_rate",
+
+ "mac_rx_total_rcvd",
+ "mac_rx_bytes",
+ "mac_rx_total_bcst",
+ "mac_rx_total_mcst",
+ "mac_rx_runts",
+ "mac_rx_ctl_packets",
+ "mac_rx_fifo_err",
+ "mac_rx_dma_drop",
+ "mac_rx_fcs_err",
+
+ "link_state_changes",
+};
+
+/* statistics of VF */
+static const char oct_vf_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors",
+ "tx_errors",
+ "rx_dropped",
+ "tx_dropped",
+ "rx_mcast",
+ "tx_mcast",
+ "rx_bcast",
+ "tx_bcast",
+ "link_state_changes",
+};
+
+/* statistics of host tx queue */
+static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "bytes",
+ "dropped",
+ "iq_busy",
+ "sgentry_sent",
+
+ "fw_instr_posted",
+ "fw_instr_processed",
+ "fw_instr_dropped",
+ "fw_bytes_sent",
+
+ "tso",
+ "vxlan",
+ "txq_restart",
+};
+
+/* statistics of host rx queue */
+static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "bytes",
+ "dropped",
+ "dropped_nomem",
+ "dropped_toomany",
+ "fw_dropped",
+ "fw_pkts_received",
+ "fw_bytes_received",
+ "fw_dropped_nodispatch",
+
+ "vxlan",
+ "buffer_alloc_failure",
+};
+
+/* LiquidIO driver private flags */
+static const char oct_priv_flags_strings[][ETH_GSTRING_LEN] = {
+};
+
+#define OCTNIC_NCMD_AUTONEG_ON 0x1
+#define OCTNIC_NCMD_PHY_ON 0x2
+
+static int lio_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+
+ linfo = &lio->linfo;
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+
+ switch (linfo->link.s.phy_type) {
+ case LIO_PHY_PORT_TP:
+ ecmd->base.port = PORT_TP;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, TP);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+
+ break;
+
+ case LIO_PHY_PORT_FIBRE:
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XLAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n");
+ ecmd->base.transceiver = XCVR_EXTERNAL;
+ } else {
+ dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n",
+ linfo->link.s.if_mode);
+ }
+
+ ecmd->base.port = PORT_FIBRE;
+ ecmd->base.autoneg = AUTONEG_DISABLE;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, Pause);
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (OCTEON_CN23XX_PF(oct)) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, 25000baseCR_Full);
+
+ if (oct->no_speed_setting == 0) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+ }
+
+ if (oct->no_speed_setting == 0) {
+ liquidio_get_speed(lio);
+ liquidio_get_fec(lio);
+ } else {
+ oct->speed_setting = 25;
+ }
+
+ if (oct->speed_setting == 10) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+ if (oct->speed_setting == 25) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+
+ if (oct->no_speed_setting)
+ break;
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, FEC_RS);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported, FEC_NONE);
+ /*FEC_OFF*/
+ if (oct->props[lio->ifidx].fec == 1) {
+ /* ETHTOOL_FEC_RS */
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising, FEC_RS);
+ } else {
+ /* ETHTOOL_FEC_OFF */
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising, FEC_NONE);
+ }
+ } else { /* VF */
+ if (linfo->link.s.speed == 10000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 10000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 10000baseCR_Full);
+ }
+
+ if (linfo->link.s.speed == 25000) {
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, supported,
+ 25000baseCR_Full);
+
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseKR_Full);
+ ethtool_link_ksettings_add_link_mode
+ (ecmd, advertising,
+ 25000baseCR_Full);
+ }
+ }
+ } else {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ }
+ break;
+ }
+
+ if (linfo->link.s.link_up) {
+ ecmd->base.speed = linfo->link.s.speed;
+ ecmd->base.duplex = linfo->link.s.duplex;
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int lio_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ const int speed = ecmd->base.speed;
+ struct lio *lio = GET_LIO(netdev);
+ struct oct_link_info *linfo;
+ struct octeon_device *oct;
+
+ oct = lio->oct_dev;
+
+ linfo = &lio->linfo;
+
+ if (!(oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID))
+ return -EOPNOTSUPP;
+
+ if (oct->no_speed_setting) {
+ dev_err(&oct->pci_dev->dev, "%s: Changing speed is not supported\n",
+ __func__);
+ return -EOPNOTSUPP;
+ }
+
+ if ((ecmd->base.duplex != DUPLEX_UNKNOWN &&
+ ecmd->base.duplex != linfo->link.s.duplex) ||
+ ecmd->base.autoneg != AUTONEG_DISABLE ||
+ (ecmd->base.speed != 10000 && ecmd->base.speed != 25000 &&
+ ecmd->base.speed != SPEED_UNKNOWN))
+ return -EOPNOTSUPP;
+
+ if ((oct->speed_boot == speed / 1000) &&
+ oct->speed_boot == oct->speed_setting)
+ return 0;
+
+ liquidio_set_speed(lio, speed / 1000);
+
+ dev_dbg(&oct->pci_dev->dev, "Port speed is set to %dG\n",
+ oct->speed_setting);
+
+ return 0;
+}
+
+static void
+lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct lio *lio;
+ struct octeon_device *oct;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio");
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+}
+
+static void
+lio_get_vf_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct octeon_device *oct;
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
+ strcpy(drvinfo->driver, "liquidio_vf");
+ strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version,
+ ETHTOOL_FWVERS_LEN);
+ strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32);
+}
+
+static int
+lio_send_queue_count_update(struct net_device *netdev, uint32_t num_queues)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_QUEUE_COUNT_CTL;
+ nctrl.ncmd.s.param1 = num_queues;
+ nctrl.ncmd.s.param2 = num_queues;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Failed to send Queue reset command (ret: 0x%x)\n",
+ ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+lio_ethtool_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;
+ u32 combined_count = 0, max_combined = 0;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
+
+ max_rx = CFG_GET_OQ_MAX_Q(conf6x);
+ max_tx = CFG_GET_IQ_MAX_Q(conf6x);
+ rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
+ tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct, cn23xx_pf);
+
+ max_combined = CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
+ combined_count = oct->num_iqs;
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ u64 reg_val = 0ULL;
+ u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
+
+ reg_val = octeon_read_csr64(oct, ctrl);
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+ max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+ combined_count = oct->num_iqs;
+ }
+
+ channel->max_rx = max_rx;
+ channel->max_tx = max_tx;
+ channel->max_combined = max_combined;
+ channel->rx_count = rx_count;
+ channel->tx_count = tx_count;
+ channel->combined_count = combined_count;
+}
+
+static int
+lio_irq_reallocate_irqs(struct octeon_device *oct, uint32_t num_ioqs)
+{
+ struct msix_entry *msix_entries;
+ int num_msix_irqs = 0;
+ int i;
+
+ if (!oct->msix_on)
+ return 0;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon.
+ */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ if (OCTEON_CN23XX_PF(oct))
+ num_msix_irqs = oct->num_msix_irqs - 1;
+ else if (OCTEON_CN23XX_VF(oct))
+ num_msix_irqs = oct->num_msix_irqs;
+
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < num_msix_irqs; i++) {
+ if (oct->ioq_vector[i].vector) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
+ }
+
+ /* non-iov vector's argument is oct struct */
+ if (OCTEON_CN23XX_PF(oct))
+ free_irq(msix_entries[i].vector, oct);
+
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ }
+
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+
+ if (octeon_allocate_ioq_vector(oct, num_ioqs)) {
+ dev_err(&oct->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return -1;
+ }
+
+ if (octeon_setup_interrupt(oct, num_ioqs)) {
+ dev_info(&oct->pci_dev->dev, "Setup interrupt failed\n");
+ return -1;
+ }
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ return 0;
+}
+
+static int
+lio_ethtool_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ u32 combined_count, max_combined;
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+ int stopped = 0;
+
+ if (strcmp(oct->fw_info.liquidio_firmware_version, "1.6.1") < 0) {
+ dev_err(&oct->pci_dev->dev, "Minimum firmware version required is 1.6.1\n");
+ return -EINVAL;
+ }
+
+ if (!channel->combined_count || channel->other_count ||
+ channel->rx_count || channel->tx_count)
+ return -EINVAL;
+
+ combined_count = channel->combined_count;
+
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (oct->sriov_info.sriov_enabled) {
+ max_combined = lio->linfo.num_txpciq;
+ } else {
+ struct octeon_config *conf23_pf =
+ CHIP_CONF(oct,
+ cn23xx_pf);
+
+ max_combined =
+ CFG_GET_IQ_MAX_Q(conf23_pf);
+ }
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ u64 reg_val = 0ULL;
+ u64 ctrl = CN23XX_VF_SLI_IQ_PKT_CONTROL64(0);
+
+ reg_val = octeon_read_csr64(oct, ctrl);
+ reg_val = reg_val >> CN23XX_PKT_INPUT_CTL_RPVF_POS;
+ max_combined = reg_val & CN23XX_PKT_INPUT_CTL_RPVF_MASK;
+ } else {
+ return -EINVAL;
+ }
+
+ if (combined_count > max_combined || combined_count < 1)
+ return -EINVAL;
+
+ if (combined_count == oct->num_iqs)
+ return 0;
+
+ ifstate_set(lio, LIO_IFSTATE_RESETTING);
+
+ if (netif_running(dev)) {
+ dev->netdev_ops->ndo_stop(dev);
+ stopped = 1;
+ }
+
+ if (lio_reset_queues(dev, combined_count))
+ return -EINVAL;
+
+ if (stopped)
+ dev->netdev_ops->ndo_open(dev);
+
+ ifstate_reset(lio, LIO_IFSTATE_RESETTING);
+
+ return 0;
+}
+
+static int lio_get_eeprom_len(struct net_device *netdev)
+{
+ u8 buf[192];
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+ int len;
+
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return len;
+}
+
+static int
+lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_board_info *board_info;
+
+ if (eeprom->offset)
+ return -EINVAL;
+
+ eeprom->magic = oct_dev->pci_dev->vendor;
+ board_info = (struct octeon_board_info *)(&oct_dev->boardinfo);
+ sprintf((char *)bytes,
+ "boardname:%s serialnum:%s maj:%lld min:%lld\n",
+ board_info->name, board_info->serial_number,
+ board_info->major, board_info->minor);
+
+ return 0;
+}
+
+static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
+ nctrl.ncmd.s.param1 = addr;
+ nctrl.ncmd.s.param2 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to configure gpio value, ret=%d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int octnet_id_active(struct net_device *netdev, int val)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ID_ACTIVE;
+ nctrl.ncmd.s.param1 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to configure gpio value, ret=%d\n", ret);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* This routine provides PHY access routines for
+ * mdio clause45 .
+ */
+static int
+octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct oct_mdio_cmd_resp *mdio_cmd_rsp;
+ struct oct_mdio_cmd *mdio_cmd;
+ int retval = 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_mdio_cmd),
+ sizeof(struct oct_mdio_cmd_resp), 0);
+
+ if (!sc)
+ return -ENOMEM;
+
+ mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
+ mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
+
+ mdio_cmd->op = op;
+ mdio_cmd->mdio_addr = loc;
+ if (op)
+ mdio_cmd->value1 = *value;
+ octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
+ 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet_mdio45_access instruction failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct_dev, sc);
+ return -EBUSY;
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived
+ */
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = mdio_cmd_rsp->status;
+ if (retval) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "octnet mdio45 access failed: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -EBUSY;
+ }
+
+ octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
+ sizeof(struct oct_mdio_cmd) / 8);
+
+ if (!op)
+ *value = mdio_cmd_rsp->resp.value1;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+static int lio_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_link_info *linfo;
+ int value, ret;
+ u32 cur_ver;
+
+ linfo = &lio->linfo;
+ cur_ver = OCT_FW_VER(oct->fw_info.ver.maj,
+ oct->fw_info.ver.min,
+ oct->fw_info.ver.rev);
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEON);
+ return 2;
+
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Save the current LED settings */
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 0,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ /* Configure Beacon values */
+ value = LIO68XX_LED_BEACON_CFGON;
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
+ if (ret)
+ return ret;
+
+ value = LIO68XX_LED_CTRL_CFGON;
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
+ if (ret)
+ return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+ if (linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ return 2;
+ else
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ case ETHTOOL_ID_ON:
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_ON);
+ else if (oct->chip_id == OCTEON_CN66XX)
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_HIGH);
+ else
+ return -EINVAL;
+
+ break;
+
+ case ETHTOOL_ID_OFF:
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID &&
+ linfo->link.s.phy_type == LIO_PHY_PORT_TP &&
+ cur_ver > OCT_FW_VER(1, 7, 2))
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+ else if (oct->chip_id == OCTEON_CN66XX)
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_LOW);
+ else
+ return -EINVAL;
+
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG,
+ VITESSE_PHY_GPIO_DRIVEOFF);
+ } else if (oct->chip_id == OCTEON_CN68XX) {
+ /* Restore LED settings */
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &lio->led_ctrl_val);
+ if (ret)
+ return ret;
+
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &lio->phy_beacon_val);
+ if (ret)
+ return ret;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ octnet_id_active(netdev, LED_IDENTIFICATION_OFF);
+
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+lio_ethtool_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
+ rx_pending = 0;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
+
+ tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
+ rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
+ } else if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
+ tx_max_pending = CN23XX_MAX_IQ_DESCRIPTORS;
+ rx_max_pending = CN23XX_MAX_OQ_DESCRIPTORS;
+ rx_pending = oct->droq[0]->max_count;
+ tx_pending = oct->instr_queue[0]->max_count;
+ }
+
+ ering->tx_pending = tx_pending;
+ ering->tx_max_pending = tx_max_pending;
+ ering->rx_pending = rx_pending;
+ ering->rx_max_pending = rx_max_pending;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+}
+
+static int lio_23xx_reconfigure_queue_count(struct lio *lio)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ u32 resp_size, data_size;
+ struct liquidio_if_cfg_resp *resp;
+ struct octeon_soft_command *sc;
+ union oct_nic_if_cfg if_cfg;
+ struct lio_version *vdata;
+ u32 ifidx_or_pfnum;
+ int retval;
+ int j;
+
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, data_size,
+ resp_size, 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s: Failed to allocate soft command\n",
+ __func__);
+ return -1;
+ }
+
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ vdata->major = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = (__force u16)cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ ifidx_or_pfnum = oct->pf_num;
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.num_oqueues = oct->sriov_info.num_pf_rings;
+ if_cfg.s.base_queue = oct->sriov_info.pf_srn;
+ if_cfg.s.gmx_port_id = oct->pf_num;
+
+ sc->iq_no = 0;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_QCOUNT_UPDATE, 0,
+ if_cfg.u64, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev,
+ "Sending iq/oq config failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ return -EIO;
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&oct->pci_dev->dev,
+ "iq/oq config failed: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -1;
+ }
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ lio->ifidx = ifidx_or_pfnum;
+ lio->linfo.num_rxpciq = hweight64(resp->cfg_info.iqmask);
+ lio->linfo.num_txpciq = hweight64(resp->cfg_info.iqmask);
+ for (j = 0; j < lio->linfo.num_rxpciq; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+
+ dev_info(&oct->pci_dev->dev, "Queue count updated to %d\n",
+ lio->linfo.num_rxpciq);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ return 0;
+}
+
+static int lio_reset_queues(struct net_device *netdev, uint32_t num_qs)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int i, queue_count_update = 0;
+ struct napi_struct *napi, *n;
+ int ret;
+
+ schedule_timeout_uninterruptible(msecs_to_jiffies(100));
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ if (octeon_set_io_queues_off(oct)) {
+ dev_err(&oct->pci_dev->dev, "Setting io queues off failed\n");
+ return -1;
+ }
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon.
+ */
+ oct->fn_list.disable_io_queues(oct);
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
+ if (num_qs != oct->num_iqs) {
+ ret = netif_set_real_num_rx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number rx failed\n");
+ return ret;
+ }
+
+ ret = netif_set_real_num_tx_queues(netdev, num_qs);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Setting real number tx failed\n");
+ return ret;
+ }
+
+ /* The value of queue_count_update decides whether it is the
+ * queue count or the descriptor count that is being
+ * re-configured.
+ */
+ queue_count_update = 1;
+ }
+
+ /* Re-configuration of queues can happen in two scenarios, SRIOV enabled
+ * and SRIOV disabled. Few things like recreating queue zero, resetting
+ * glists and IRQs are required for both. For the latter, some more
+ * steps like updating sriov_info for the octeon device need to be done.
+ */
+ if (queue_count_update) {
+ cleanup_rx_oom_poll_fn(netdev);
+
+ lio_delete_glists(lio);
+
+ /* Delete mbox for PF which is SRIOV disabled because sriov_info
+ * will be now changed.
+ */
+ if ((OCTEON_CN23XX_PF(oct)) && !oct->sriov_info.sriov_enabled)
+ oct->fn_list.free_mbox(oct);
+ }
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ if (queue_count_update) {
+ /* For PF re-configure sriov related information */
+ if ((OCTEON_CN23XX_PF(oct)) &&
+ !oct->sriov_info.sriov_enabled) {
+ oct->sriov_info.num_pf_rings = num_qs;
+ if (cn23xx_sriov_config(oct)) {
+ dev_err(&oct->pci_dev->dev,
+ "Queue reset aborted: SRIOV config failed\n");
+ return -1;
+ }
+
+ num_qs = oct->sriov_info.num_pf_rings;
+ }
+ }
+
+ if (oct->fn_list.setup_device_regs(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to configure device registers\n");
+ return -1;
+ }
+
+ /* The following are needed in case of queue count re-configuration and
+ * not for descriptor count re-configuration.
+ */
+ if (queue_count_update) {
+ if (octeon_setup_instr_queues(oct))
+ return -1;
+
+ if (octeon_setup_output_queues(oct))
+ return -1;
+
+ /* Recreating mbox for PF that is SRIOV disabled */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (oct->fn_list.setup_mbox(oct)) {
+ dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+ return -1;
+ }
+ }
+
+ /* Deleting and recreating IRQs whether the interface is SRIOV
+ * enabled or disabled.
+ */
+ if (lio_irq_reallocate_irqs(oct, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "IRQs could not be allocated\n");
+ return -1;
+ }
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Failed to enable input/output queues\n");
+ return -1;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++)
+ writel(oct->droq[i]->max_count,
+ oct->droq[i]->pkts_credit_reg);
+
+ /* Informing firmware about the new queue count. It is required
+ * for firmware to allocate more number of queues than those at
+ * load time.
+ */
+ if (OCTEON_CN23XX_PF(oct) && !oct->sriov_info.sriov_enabled) {
+ if (lio_23xx_reconfigure_queue_count(lio))
+ return -1;
+ }
+ }
+
+ /* Once firmware is aware of the new value, queues can be recreated */
+ if (liquidio_setup_io_queues(oct, 0, num_qs, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "I/O queues creation failed\n");
+ return -1;
+ }
+
+ if (queue_count_update) {
+ if (lio_setup_glists(oct, lio, num_qs)) {
+ dev_err(&oct->pci_dev->dev, "Gather list allocation failed\n");
+ return -1;
+ }
+
+ if (setup_rx_oom_poll_fn(netdev)) {
+ dev_err(&oct->pci_dev->dev, "lio_setup_rx_oom_poll_fn failed\n");
+ return 1;
+ }
+
+ /* Send firmware the information about new number of queues
+ * if the interface is a VF or a PF that is SRIOV enabled.
+ */
+ if (oct->sriov_info.sriov_enabled || OCTEON_CN23XX_VF(oct))
+ if (lio_send_queue_count_update(netdev, num_qs))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+lio_ethtool_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ u32 rx_count, tx_count, rx_count_old, tx_count_old;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int stopped = 0;
+
+ if (!OCTEON_CN23XX_PF(oct) && !OCTEON_CN23XX_VF(oct))
+ return -EINVAL;
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending)
+ return -EINVAL;
+
+ rx_count = clamp_t(u32, ering->rx_pending, CN23XX_MIN_OQ_DESCRIPTORS,
+ CN23XX_MAX_OQ_DESCRIPTORS);
+ tx_count = clamp_t(u32, ering->tx_pending, CN23XX_MIN_IQ_DESCRIPTORS,
+ CN23XX_MAX_IQ_DESCRIPTORS);
+
+ rx_count_old = oct->droq[0]->max_count;
+ tx_count_old = oct->instr_queue[0]->max_count;
+
+ if (rx_count == rx_count_old && tx_count == tx_count_old)
+ return 0;
+
+ ifstate_set(lio, LIO_IFSTATE_RESETTING);
+
+ if (netif_running(netdev)) {
+ netdev->netdev_ops->ndo_stop(netdev);
+ stopped = 1;
+ }
+
+ /* Change RX/TX DESCS count */
+ if (tx_count != tx_count_old)
+ CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ tx_count);
+ if (rx_count != rx_count_old)
+ CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ rx_count);
+
+ if (lio_reset_queues(netdev, oct->num_iqs))
+ goto err_lio_reset_queues;
+
+ if (stopped)
+ netdev->netdev_ops->ndo_open(netdev);
+
+ ifstate_reset(lio, LIO_IFSTATE_RESETTING);
+
+ return 0;
+
+err_lio_reset_queues:
+ if (tx_count != tx_count_old)
+ CFG_SET_NUM_TX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ tx_count_old);
+ if (rx_count != rx_count_old)
+ CFG_SET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(oct), lio->ifidx,
+ rx_count_old);
+ return -EINVAL;
+}
+
+static u32 lio_get_msglevel(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->msg_enable;
+}
+
+static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
+ if (msglvl & NETIF_MSG_HW)
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
+ else
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_DISABLE, 0);
+ }
+
+ lio->msg_enable = msglvl;
+}
+
+static void lio_vf_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ lio->msg_enable = msglvl;
+}
+
+static void
+lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers. Just report pause frame support.
+ */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ pause->autoneg = 0;
+
+ pause->tx_pause = oct->tx_pause;
+ pause->rx_pause = oct->rx_pause;
+}
+
+static int
+lio_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
+{
+ /* Notes: Not supporting any auto negotiation in these
+ * drivers.
+ */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct oct_link_info *linfo = &lio->linfo;
+
+ int ret = 0;
+
+ if (oct->chip_id != OCTEON_CN23XX_PF_VID)
+ return -EINVAL;
+
+ if (linfo->link.s.duplex == 0) {
+ /*no flow control for half duplex*/
+ if (pause->rx_pause || pause->tx_pause)
+ return -EINVAL;
+ }
+
+ /*do not support autoneg of link flow control*/
+ if (pause->autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_FLOW_CTL;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ if (pause->rx_pause) {
+ /*enable rx pause*/
+ nctrl.ncmd.s.param1 = 1;
+ } else {
+ /*disable rx pause*/
+ nctrl.ncmd.s.param1 = 0;
+ }
+
+ if (pause->tx_pause) {
+ /*enable tx pause*/
+ nctrl.ncmd.s.param2 = 1;
+ } else {
+ /*disable tx pause*/
+ nctrl.ncmd.s.param2 = 0;
+ }
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to set pause parameter, ret=%d\n", ret);
+ return -EINVAL;
+ }
+
+ oct->rx_pause = pause->rx_pause;
+ oct->tx_pause = pause->tx_pause;
+
+ return 0;
+}
+
+static void
+lio_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats __attribute__((unused)),
+ u64 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ struct rtnl_link_stats64 lstats;
+ int i = 0, j;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
+ /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = lstats.rx_packets;
+ /*sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = lstats.tx_packets;
+ /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = lstats.rx_bytes;
+ /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors +
+ oct_dev->link_stats.fromwire.fcs_err +
+ oct_dev->link_stats.fromwire.jabber_err +
+ oct_dev->link_stats.fromwire.l2_err +
+ oct_dev->link_stats.fromwire.frame_err;
+ data[i++] = lstats.tx_errors;
+ /*sum of oct->droq[oq_no]->stats->rx_dropped +
+ *oct->droq[oq_no]->stats->dropped_nodispatch +
+ *oct->droq[oq_no]->stats->dropped_toomany +
+ *oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = lstats.rx_dropped +
+ oct_dev->link_stats.fromwire.fifo_err +
+ oct_dev->link_stats.fromwire.dmac_drop +
+ oct_dev->link_stats.fromwire.red_drops +
+ oct_dev->link_stats.fromwire.fw_err_pko +
+ oct_dev->link_stats.fromwire.fw_err_link +
+ oct_dev->link_stats.fromwire.fw_err_drop;
+ /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = lstats.tx_dropped +
+ oct_dev->link_stats.fromhost.max_collision_fail +
+ oct_dev->link_stats.fromhost.max_deferral_fail +
+ oct_dev->link_stats.fromhost.total_collisions +
+ oct_dev->link_stats.fromhost.fw_err_pko +
+ oct_dev->link_stats.fromhost.fw_err_link +
+ oct_dev->link_stats.fromhost.fw_err_drop +
+ oct_dev->link_stats.fromhost.fw_err_pki;
+
+ /* firmware tx stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
+ *fromhost.fw_total_sent
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
+ /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pki */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pki);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tso_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_tso
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+
+ /* Multicast packets sent by this port */
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
+ /* mac tx statistics */
+ /*CVMX_BGXX_CMRX_TX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT15 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT14 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT17 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
+ /*CVMX_BGXX_CMRX_TX_STAT3 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT2 */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT16 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
+ /*CVMX_BGXX_CMRX_TX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
+
+ /* RX firmware stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_rcvd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /* Multicast packets received on this port */
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_err_pko
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan_err
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
+
+ /* LRO */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_pkts
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_octs
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_port
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_seq
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_tsval
+ */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_timer
+ */
+ /* intrmod: packet forward rate */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
+
+ /* mac: link-level stats */
+ /*CVMX_BGXX_CMRX_RX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
+ /*CVMX_BGXX_CMRX_RX_STAT2 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
+ /*CVMX_BGXX_CMRX_RX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
+ /*lio->link_changes*/
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(j)))
+ continue;
+ /*packets to network port*/
+ /*# of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /*# of bytes tx to network */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /*# of packets dropped */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ /*# of tx fails due to queue full */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /*XXX gather entries sent */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /*instruction to firmware: data and control */
+ /*# of instructions to the queue */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ /*# of instructions processed */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_processed);
+ /*# of instructions could not be processed */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_dropped);
+ /*bytes sent through the queue */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+
+ /*tso request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /*vxlan request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /*txq restart*/
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
+ }
+
+ /* RX */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(j)))
+ continue;
+
+ /*packets send to TCP/IP network stack */
+ /*# of packets to network stack */
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ /*# of bytes to network stack */
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ /*# of packets dropped */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /*control and data path*/
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
+ }
+}
+
+static void lio_vf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats
+ __attribute__((unused)),
+ u64 *data)
+{
+ struct rtnl_link_stats64 lstats;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i = 0, j, vj;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
+ netdev->netdev_ops->ndo_get_stats64(netdev, &lstats);
+ /* sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = lstats.rx_packets;
+ /* sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = lstats.tx_packets;
+ /* sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = lstats.rx_bytes;
+ /* sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = lstats.tx_bytes;
+ data[i++] = lstats.rx_errors;
+ data[i++] = lstats.tx_errors;
+ /* sum of oct->droq[oq_no]->stats->rx_dropped +
+ * oct->droq[oq_no]->stats->dropped_nodispatch +
+ * oct->droq[oq_no]->stats->dropped_toomany +
+ * oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = lstats.rx_dropped;
+ /* sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = lstats.tx_dropped +
+ oct_dev->link_stats.fromhost.fw_err_drop;
+
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_mcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_mcast_sent;
+ data[i++] = oct_dev->link_stats.fromwire.fw_total_bcast;
+ data[i++] = oct_dev->link_stats.fromhost.fw_total_bcast_sent;
+
+ /* lio->link_changes */
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ for (vj = 0; vj < oct_dev->num_iqs; vj++) {
+ j = lio->linfo.txpciq[vj].s.q_no;
+
+ /* packets to network port */
+ /* # of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /* # of bytes tx to network */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /* # of packets dropped */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_dropped);
+ /* # of tx fails due to queue full */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /* XXX gather entries sent */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /* instruction to firmware: data and control */
+ /* # of instructions to the queue */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.instr_posted);
+ /* # of instructions processed */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_processed);
+ /* # of instructions could not be processed */
+ data[i++] =
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_dropped);
+ /* bytes sent through the queue */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.bytes_sent);
+ /* tso request */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /* vxlan request */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /* txq restart */
+ data[i++] = CVM_CAST64(
+ oct_dev->instr_queue[j]->stats.tx_restart);
+ }
+
+ /* RX */
+ for (vj = 0; vj < oct_dev->num_oqs; vj++) {
+ j = lio->linfo.rxpciq[vj].s.q_no;
+
+ /* packets send to TCP/IP network stack */
+ /* # of packets to network stack */
+ data[i++] = CVM_CAST64(
+ oct_dev->droq[j]->stats.rx_pkts_received);
+ /* # of bytes to network stack */
+ data[i++] = CVM_CAST64(
+ oct_dev->droq[j]->stats.rx_bytes_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /* control and data path */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
+ }
+}
+
+static void lio_get_priv_flags_strings(struct lio *lio, u8 *data)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int i;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
+ for (i = 0; i < ARRAY_SIZE(oct_priv_flags_strings); i++) {
+ sprintf(data, "%s", oct_priv_flags_strings[i]);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ break;
+ }
+}
+
+static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_iq_stats, num_oq_stats, i, j;
+ int num_stats;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
+ }
+}
+
+static void lio_vf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ int num_iq_stats, num_oq_stats, i, j;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+ int num_stats;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_vf_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_vf_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ lio_get_priv_flags_strings(lio, data);
+ break;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
+ }
+}
+
+static int lio_get_priv_flags_ss_count(struct lio *lio)
+{
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (oct_dev->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
+ return ARRAY_SIZE(oct_priv_flags_strings);
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ return -EOPNOTSUPP;
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lio_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int lio_vf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_vf_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ case ETH_SS_PRIV_FLAGS:
+ return lio_get_priv_flags_ss_count(lio);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* get interrupt moderation parameters */
+static int octnet_get_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_resp *resp;
+ int retval;
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_intrmod_resp), 0);
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_intrmod_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_intrmod_resp));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_PARAMS, 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return -ENODEV;
+
+ if (resp->status) {
+ dev_err(&oct_dev->pci_dev->dev,
+ "Get interrupt moderation parameters failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -ENODEV;
+ }
+
+ octeon_swap_8B_data((u64 *)&resp->intrmod,
+ (sizeof(struct oct_intrmod_cfg)) / 8);
+ memcpy(intr_cfg, &resp->intrmod, sizeof(struct oct_intrmod_cfg));
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ return 0;
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_set_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
+{
+ struct octeon_soft_command *sc;
+ struct oct_intrmod_cfg *cfg;
+ int retval;
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ sizeof(struct oct_intrmod_cfg),
+ 16, 0);
+
+ if (!sc)
+ return -ENOMEM;
+
+ cfg = (struct oct_intrmod_cfg *)sc->virtdptr;
+
+ memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg));
+ octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(oct_dev, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = sc->sc_status;
+ if (retval == 0) {
+ dev_info(&oct_dev->pci_dev->dev,
+ "Rx-Adaptive Interrupt moderation %s\n",
+ (intr_cfg->rx_enable) ?
+ "enabled" : "disabled");
+ WRITE_ONCE(sc->caller_is_done, true);
+ return 0;
+ }
+
+ dev_err(&oct_dev->pci_dev->dev,
+ "intrmod config failed. Status: %x\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -ENODEV;
+}
+
+static int lio_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_instr_queue *iq;
+ struct oct_intrmod_cfg intrmod_cfg;
+
+ if (octnet_get_intrmod_cfg(lio, &intrmod_cfg))
+ return -ENODEV;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID: {
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs = oct->rx_coalesce_usecs;
+ intr_coal->rx_max_coalesced_frames =
+ oct->rx_max_coalesced_frames;
+ }
+ if (!intrmod_cfg.tx_enable)
+ intr_coal->tx_max_coalesced_frames =
+ oct->tx_max_coalesced_frames;
+ break;
+ }
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intrmod_cfg.rx_enable) {
+ intr_coal->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
+ intr_coal->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
+ }
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
+ intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
+ break;
+ }
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
+ return -EINVAL;
+ }
+ if (intrmod_cfg.rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg.rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg.check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg.maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg.minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg.rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg.rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg.rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg.rx_mincnt_trigger;
+ }
+ if ((OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) &&
+ (intrmod_cfg.tx_enable)) {
+ intr_coal->use_adaptive_tx_coalesce =
+ intrmod_cfg.tx_enable;
+ intr_coal->tx_max_coalesced_frames_high =
+ intrmod_cfg.tx_maxcnt_trigger;
+ intr_coal->tx_max_coalesced_frames_low =
+ intrmod_cfg.tx_mincnt_trigger;
+ }
+ return 0;
+}
+
+/* Enable/Disable auto interrupt Moderation */
+static int oct_cfg_adaptive_intr(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod_cfg,
+ struct ethtool_coalesce *intr_coal)
+{
+ int ret = 0;
+
+ if (intrmod_cfg->rx_enable || intrmod_cfg->tx_enable) {
+ intrmod_cfg->check_intrvl = intr_coal->rate_sample_interval;
+ intrmod_cfg->maxpkt_ratethr = intr_coal->pkt_rate_high;
+ intrmod_cfg->minpkt_ratethr = intr_coal->pkt_rate_low;
+ }
+ if (intrmod_cfg->rx_enable) {
+ intrmod_cfg->rx_maxcnt_trigger =
+ intr_coal->rx_max_coalesced_frames_high;
+ intrmod_cfg->rx_maxtmr_trigger =
+ intr_coal->rx_coalesce_usecs_high;
+ intrmod_cfg->rx_mintmr_trigger =
+ intr_coal->rx_coalesce_usecs_low;
+ intrmod_cfg->rx_mincnt_trigger =
+ intr_coal->rx_max_coalesced_frames_low;
+ }
+ if (intrmod_cfg->tx_enable) {
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
+ }
+
+ ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
+
+ return ret;
+}
+
+static int
+oct_cfg_rx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ u32 rx_max_coalesced_frames;
+
+ /* Config Cnt based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ break;
+ }
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = intrmod->rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ (rx_max_coalesced_frames - 1));
+ /*consider setting resend bit*/
+ }
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
+ break;
+ }
+ case OCTEON_CN23XX_VF_VID: {
+ int q_no;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = intrmod->rx_frames;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (octeon_read_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no)) &
+ (0x3fffff00000000UL)) |
+ (rx_max_coalesced_frames - 1));
+ /*consider writing to resend bit here*/
+ }
+ intrmod->rx_frames = rx_max_coalesced_frames;
+ oct->rx_max_coalesced_frames = rx_max_coalesced_frames;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int oct_cfg_rx_intrtime(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ u32 time_threshold, rx_coalesce_usecs;
+
+ /* Config Time based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct,
+ rx_coalesce_usecs);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_OQ_INT_LEVEL_TIME,
+ time_threshold);
+
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ break;
+ }
+ case OCTEON_CN23XX_PF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = intrmod->rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ time_threshold =
+ cn23xx_pf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ q_no += oct->sriov_info.pf_srn;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
+ /*consider writing to resend bit here*/
+ }
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
+ break;
+ }
+ case OCTEON_CN23XX_VF_VID: {
+ u64 time_threshold;
+ int q_no;
+
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = intrmod->rx_usecs;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+
+ time_threshold =
+ cn23xx_vf_get_oq_ticks(oct, (u32)rx_coalesce_usecs);
+ for (q_no = 0; q_no < oct->num_oqs; q_no++) {
+ octeon_write_csr64(
+ oct, CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(q_no),
+ (intrmod->rx_frames |
+ ((u64)time_threshold << 32)));
+ /*consider setting resend bit*/
+ }
+ intrmod->rx_usecs = rx_coalesce_usecs;
+ oct->rx_coalesce_usecs = rx_coalesce_usecs;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+oct_cfg_tx_intrcnt(struct lio *lio,
+ struct oct_intrmod_cfg *intrmod,
+ struct ethtool_coalesce *intr_coal)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ u32 iq_intr_pkt;
+ void __iomem *inst_cnt_reg;
+ u64 val;
+
+ /* Config Cnt based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ case OCTEON_CN23XX_VF_VID:
+ case OCTEON_CN23XX_PF_VID: {
+ int q_no;
+
+ if (!intr_coal->tx_max_coalesced_frames)
+ iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ else
+ iq_intr_pkt = intr_coal->tx_max_coalesced_frames &
+ CN23XX_PKT_IN_DONE_WMARK_MASK;
+ for (q_no = 0; q_no < oct->num_iqs; q_no++) {
+ inst_cnt_reg = (oct->instr_queue[q_no])->inst_cnt_reg;
+ val = readq(inst_cnt_reg);
+ /*clear wmark and count.dont want to write count back*/
+ val = (val & 0xFFFF000000000000ULL) |
+ ((u64)(iq_intr_pkt - 1)
+ << CN23XX_PKT_IN_DONE_WMARK_BIT_POS);
+ writeq(val, inst_cnt_reg);
+ /*consider setting resend bit*/
+ }
+ intrmod->tx_frames = iq_intr_pkt;
+ oct->tx_max_coalesced_frames = iq_intr_pkt;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lio_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *intr_coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int ret;
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_intrmod_cfg intrmod = {0};
+ u32 j, q_no;
+ int db_max, db_min;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ db_min = CN6XXX_DB_MIN;
+ db_max = CN6XXX_DB_MAX;
+ if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
+ (intr_coal->tx_max_coalesced_frames <= db_max)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j].s.q_no;
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames,
+ db_min, db_max);
+ return -EINVAL;
+ }
+ break;
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
+ intrmod.rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod.rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ intrmod.tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
+
+ ret = oct_cfg_adaptive_intr(lio, &intrmod, intr_coal);
+
+ if (!intr_coal->use_adaptive_rx_coalesce) {
+ ret = oct_cfg_rx_intrtime(lio, &intrmod, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+
+ ret = oct_cfg_rx_intrcnt(lio, &intrmod, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ } else {
+ oct->rx_coalesce_usecs =
+ CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
+ oct->rx_max_coalesced_frames =
+ CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ }
+
+ if (!intr_coal->use_adaptive_tx_coalesce) {
+ ret = oct_cfg_tx_intrcnt(lio, &intrmod, intr_coal);
+ if (ret)
+ goto ret_intrmod;
+ } else {
+ oct->tx_max_coalesced_frames =
+ CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
+ }
+
+ return 0;
+ret_intrmod:
+ return ret;
+}
+
+static int lio_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ info->so_timestamping =
+#ifdef PTP_HARDWARE_TIMESTAMPING
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+#endif
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+
+ if (lio->ptp_clock)
+ info->phc_index = ptp_clock_index(lio->ptp_clock);
+ else
+ info->phc_index = -1;
+
+#ifdef PTP_HARDWARE_TIMESTAMPING
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+#endif
+
+ return 0;
+}
+
+/* Return register dump len. */
+static int lio_get_regs_len(struct net_device *dev)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX;
+ case OCTEON_CN23XX_VF_VID:
+ return OCT_ETHTOOL_REGDUMP_LEN_23XX_VF;
+ default:
+ return OCT_ETHTOOL_REGDUMP_LEN;
+ }
+}
+
+static int cn23xx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ u8 pf_num = oct->pf_num;
+ int len = 0;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ /*0x29030 or 0x29040*/
+ reg = CN23XX_SLI_PKT_MAC_RINFO64(oct->pcie_port, oct->pf_num);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_MAC%d_PF%d_RINFO): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27080 or 0x27090*/
+ reg = CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_ENB): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27000 or 0x27010*/
+ reg = CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_MAC%d_PF%d_INT_SUM): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29120*/
+ reg = 0x29120;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_MEM_CTL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27300*/
+ reg = 0x27300 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_MAC%d_PF%d_PKT_VF_INT): %016llx\n", reg,
+ oct->pcie_port, oct->pf_num, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x27200*/
+ reg = 0x27200 + oct->pcie_port * CN23XX_MAC_INT_OFFSET +
+ (oct->pf_num) * CN23XX_PF_INT_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_MAC%d_PF%d_PP_VF_INT): %016llx\n",
+ reg, oct->pcie_port, oct->pf_num,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*29130*/
+ reg = CN23XX_SLI_PKT_CNT_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_CNT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29140*/
+ reg = CN23XX_SLI_PKT_TIME_INT;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_TIME_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29160*/
+ reg = 0x29160;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_INT): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29180*/
+ reg = CN23XX_SLI_OQ_WMARK;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_OUTPUT_WMARK): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x291E0*/
+ reg = CN23XX_SLI_PKT_IOQ_RING_RST;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_RING_RST): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29210*/
+ reg = CN23XX_SLI_GBL_CONTROL;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_GBL_CONTROL): %016llx\n", reg,
+ (u64)octeon_read_csr64(oct, reg));
+
+ /*0x29220*/
+ reg = 0x29220;
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT_BIST_STATUS): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+
+ /*PF only*/
+ if (pf_num == 0) {
+ /*0x29260*/
+ reg = CN23XX_SLI_OUT_BP_EN_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ } else if (pf_num == 1) {
+ /*0x29270*/
+ reg = CN23XX_SLI_OUT_BP_EN2_W1S;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_OUT_BP_EN2_W1S): %016llx\n",
+ reg, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BUFF_INFO_SIZE(i);
+ len +=
+ sprintf(s + len, "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10080*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10090*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_SIZE(i);
+ len += sprintf(
+ s + len, "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10050*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10070*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100a0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100b0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x100c0*/
+ for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
+ reg = 0x100c0 + i * CN23XX_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+
+ /*0x10000*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10010*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n", reg,
+ i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10020*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_DOORBELL(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10030*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
+ reg = CN23XX_SLI_IQ_SIZE(i);
+ len += sprintf(
+ s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ /*0x10040*/
+ for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++)
+ reg = CN23XX_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
+}
+
+static int cn23xx_vf_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ int len = 0;
+ u32 reg;
+ int i;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_OUT_SIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKT_CONTROL(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d__OUTPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_SLIST_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INT_LEVELS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%08x] (SLI_PKT%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = 0x100c0 + i * CN23XX_VF_OQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_ERROR_INFO): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = 0x100d0 + i * CN23XX_VF_IQ_OFFSET;
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_VF_INT_SUM): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_PKT_CONTROL64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INPUT_CONTROL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_BASE_ADDR64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BADDR): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_BAOFF_DBELL): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_SIZE(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT%d_INSTR_FIFO_RSIZE): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ for (i = 0; i < (oct->sriov_info.rings_per_vf); i++) {
+ reg = CN23XX_VF_SLI_IQ_INSTR_COUNT64(i);
+ len += sprintf(s + len,
+ "\n[%08x] (SLI_PKT_IN_DONE%d_CNTS): %016llx\n",
+ reg, i, (u64)octeon_read_csr64(oct, reg));
+ }
+
+ return len;
+}
+
+static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct)
+{
+ u32 reg;
+ int i, len = 0;
+
+ /* PCI Window Registers */
+
+ len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n");
+ reg = CN6XXX_WIN_WR_ADDR_LO;
+ len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n",
+ CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n",
+ CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_LO;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n",
+ CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_RD_ADDR_HI;
+ len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n",
+ CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_LO;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n",
+ CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg));
+ reg = CN6XXX_WIN_WR_DATA_HI;
+ len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n",
+ CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg));
+ len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n",
+ CN6XXX_WIN_WR_MASK_REG,
+ octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG));
+
+ /* PCI Interrupt Register */
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct,
+ CN6XXX_SLI_INT_ENB64_PORT0));
+ len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n",
+ CN6XXX_SLI_INT_ENB64_PORT1,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1));
+ len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64,
+ octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64));
+
+ /* PCI Output queue registers */
+ for (i = 0; i < oct->num_oqs; i++) {
+ reg = CN6XXX_SLI_OQ_PKTS_SENT(i);
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i);
+ len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS;
+ len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME;
+ len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n",
+ reg, octeon_read_csr(oct, reg));
+
+ /* PCI Input queue registers */
+ for (i = 0; i <= 3; i++) {
+ u32 reg;
+
+ reg = CN6XXX_SLI_IQ_DOORBELL(i);
+ len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ reg = CN6XXX_SLI_IQ_INSTR_COUNT(i);
+ len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n",
+ reg, i, octeon_read_csr(oct, reg));
+ }
+
+ /* PCI DMA registers */
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n",
+ CN6XXX_DMA_CNT(0),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(0)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_TIME_INT_LEVEL(0);
+ len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(0),
+ octeon_read_csr(oct, reg));
+
+ len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n",
+ CN6XXX_DMA_CNT(1),
+ octeon_read_csr(oct, CN6XXX_DMA_CNT(1)));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n",
+ CN6XXX_DMA_PKT_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+ reg = CN6XXX_DMA_PKT_INT_LEVEL(1);
+ len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n",
+ CN6XXX_DMA_TIME_INT_LEVEL(1),
+ octeon_read_csr(oct, reg));
+
+ /* PCI Index registers */
+
+ len += sprintf(s + len, "\n");
+
+ for (i = 0; i < 16; i++) {
+ reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port));
+ len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n",
+ CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg);
+ }
+
+ return len;
+}
+
+static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct)
+{
+ u32 val;
+ int i, len = 0;
+
+ /* PCI CONFIG Registers */
+
+ len += sprintf(s + len,
+ "\n\t Octeon Config space Registers\n\n");
+
+ for (i = 0; i <= 13; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ for (i = 30; i <= 34; i++) {
+ pci_read_config_dword(oct->pci_dev, (i * 4), &val);
+ len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n",
+ (i * 4), i, val);
+ }
+
+ return len;
+}
+
+/* Return register dump user app. */
+static void lio_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *regbuf)
+{
+ struct lio *lio = GET_LIO(dev);
+ int len = 0;
+ struct octeon_device *oct = lio->oct_dev;
+
+ regs->version = OCT_ETHTOOL_REGSVER;
+
+ switch (oct->chip_id) {
+ case OCTEON_CN23XX_PF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX);
+ len += cn23xx_read_csr_reg(regbuf + len, oct);
+ break;
+ case OCTEON_CN23XX_VF_VID:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN_23XX_VF);
+ len += cn23xx_vf_read_csr_reg(regbuf + len, oct);
+ break;
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
+ len += cn6xxx_read_csr_reg(regbuf + len, oct);
+ len += cn6xxx_read_config_reg(regbuf + len, oct);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n",
+ __func__, oct->chip_id);
+ }
+}
+
+static u32 lio_get_priv_flags(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->oct_dev->priv_flags;
+}
+
+static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lio *lio = GET_LIO(netdev);
+ bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
+
+ lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
+ intr_by_tx_bytes);
+ return 0;
+}
+
+static int lio_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ fec->active_fec = ETHTOOL_FEC_NONE;
+ fec->fec = ETHTOOL_FEC_NONE;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (oct->no_speed_setting == 1)
+ return 0;
+
+ liquidio_get_fec(lio);
+ fec->fec = (ETHTOOL_FEC_RS | ETHTOOL_FEC_OFF);
+ if (oct->props[lio->ifidx].fec == 1)
+ fec->active_fec = ETHTOOL_FEC_RS;
+ else
+ fec->active_fec = ETHTOOL_FEC_OFF;
+ }
+
+ return 0;
+}
+
+static int lio_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID ||
+ oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) {
+ if (oct->no_speed_setting == 1)
+ return -EOPNOTSUPP;
+
+ if (fec->fec & ETHTOOL_FEC_OFF)
+ liquidio_set_fec(lio, 0);
+ else if (fec->fec & ETHTOOL_FEC_RS)
+ liquidio_set_fec(lio, 1);
+ else
+ return -EOPNOTSUPP;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#define LIO_ETHTOOL_COALESCE (ETHTOOL_COALESCE_RX_USECS | \
+ ETHTOOL_COALESCE_MAX_FRAMES | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_LOW | \
+ ETHTOOL_COALESCE_RX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH | \
+ ETHTOOL_COALESCE_PKT_RATE_RX_USECS)
+
+static const struct ethtool_ops lio_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
+ .get_link_ksettings = lio_get_link_ksettings,
+ .set_link_ksettings = lio_set_link_ksettings,
+ .get_fecparam = lio_get_fecparam,
+ .set_fecparam = lio_set_fecparam,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .set_ringparam = lio_ethtool_set_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .set_channels = lio_ethtool_set_channels,
+ .set_phys_id = lio_set_phys_id,
+ .get_eeprom_len = lio_get_eeprom_len,
+ .get_eeprom = lio_get_eeprom,
+ .get_strings = lio_get_strings,
+ .get_ethtool_stats = lio_get_ethtool_stats,
+ .get_pauseparam = lio_get_pauseparam,
+ .set_pauseparam = lio_set_pauseparam,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_set_msglevel,
+ .get_sset_count = lio_get_sset_count,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
+ .get_ts_info = lio_get_ts_info,
+};
+
+static const struct ethtool_ops lio_vf_ethtool_ops = {
+ .supported_coalesce_params = LIO_ETHTOOL_COALESCE,
+ .get_link_ksettings = lio_get_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = lio_get_vf_drvinfo,
+ .get_ringparam = lio_ethtool_get_ringparam,
+ .set_ringparam = lio_ethtool_set_ringparam,
+ .get_channels = lio_ethtool_get_channels,
+ .set_channels = lio_ethtool_set_channels,
+ .get_strings = lio_vf_get_strings,
+ .get_ethtool_stats = lio_vf_get_ethtool_stats,
+ .get_regs_len = lio_get_regs_len,
+ .get_regs = lio_get_regs,
+ .get_msglevel = lio_get_msglevel,
+ .set_msglevel = lio_vf_set_msglevel,
+ .get_sset_count = lio_vf_get_sset_count,
+ .get_coalesce = lio_get_intr_coalesce,
+ .set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
+ .get_ts_info = lio_get_ts_info,
+};
+
+void liquidio_set_ethtool_ops(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (OCTEON_CN23XX_VF(oct))
+ netdev->ethtool_ops = &lio_vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &lio_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
new file mode 100644
index 000000000..98793b2ac
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -0,0 +1,4366 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <net/vxlan.h>
+#include <linux/kthread.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn68xx_device.h"
+#include "cn23xx_pf_device.h"
+#include "liquidio_image.h"
+#include "lio_vf_rep.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
+ "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
+
+static int ddr_timeout = 10000;
+module_param(ddr_timeout, int, 0644);
+MODULE_PARM_DESC(ddr_timeout,
+ "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
+module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
+MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
+
+static u32 console_bitmask;
+module_param(console_bitmask, int, 0644);
+MODULE_PARM_DESC(console_bitmask,
+ "Bitmask indicating which consoles have debug output redirected to syslog.");
+
+/**
+ * octeon_console_debug_enabled - determines if a given console has debug enabled.
+ * @console: console to check
+ * Return: 1 = enabled. 0 otherwise
+ */
+static int octeon_console_debug_enabled(u32 console)
+{
+ return (console_bitmask >> (console)) & 0x1;
+}
+
+/* Polling interval for determining when NIC application is alive */
+#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
+
+/* runtime link query interval */
+#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
+/* update localtime to octeon firmware every 60 seconds.
+ * make firmware to use same time reference, so that it will be easy to
+ * correlate firmware logged events/errors with host events, for debugging.
+ */
+#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
+
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
+
+struct oct_link_status_resp {
+ u64 rh;
+ struct oct_link_info link_info;
+ u64 status;
+};
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+/* Octeon device properties to be used by the NIC module.
+ * Each octeon device in the system will be represented
+ * by this structure in the NIC module.
+ */
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+struct handshake {
+ struct completion init;
+ struct completion started;
+ struct pci_dev *pci_dev;
+ int init_ok;
+ int started_ok;
+};
+
+#ifdef CONFIG_PCI_IOV
+static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
+#endif
+
+static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
+ char *prefix, char *suffix);
+
+static int octeon_device_init(struct octeon_device *);
+static int liquidio_stop(struct net_device *netdev);
+static void liquidio_remove(struct pci_dev *pdev);
+static int liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+ int linkstate);
+
+static struct handshake handshake[MAX_OCTEON_DEVICES];
+static struct completion first_stage;
+
+static void octeon_droq_bh(struct tasklet_struct *t)
+{
+ int q_no;
+ int reschedule = 0;
+ struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
+ droq_tasklet);
+ struct octeon_device *oct = oct_priv->dev;
+
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
+ continue;
+ reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
+ MAX_PACKET_BUDGET);
+ lio_enable_irq(oct->droq[q_no], NULL);
+
+ if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
+ /* set time and cnt interrupt thresholds for this DROQ
+ * for NAPI
+ */
+ int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
+
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
+ 0x5700000040ULL);
+ octeon_write_csr64(
+ oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
+ }
+ }
+
+ if (reschedule)
+ tasklet_schedule(&oct_priv->droq_tasklet);
+}
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = 100, pkt_cnt = 0, pending_pkts = 0;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+/**
+ * force_io_queues_off - Forces all IO queues off on a given device
+ * @oct: Pointer to Octeon device
+ */
+static void force_io_queues_off(struct octeon_device *oct)
+{
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) {
+ /* Reset the Enable bits for Input Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+
+ /* Reset the Enable bits for Output Queues. */
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ }
+}
+
+/**
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
+ */
+static inline void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+ force_io_queues_off(oct);
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ int pos = 0x100;
+ u32 status, mask;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ pci_disable_device(oct->pci_dev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ pcierror_quiesce_device(oct);
+
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+}
+
+/**
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ /* Always return a DISCONNECT. There is no support for recovery but only
+ * for a clean shutdown.
+ */
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * liquidio_pcie_mmio_enabled - mmio handler
+ * @pdev: Pointer to PCI device
+ */
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * liquidio_pcie_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the octeon_resume routine.
+ */
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
+{
+ /* We should never hit this since we never ask for a reset for a Fatal
+ * Error. We always return DISCONNECT in io_error above.
+ * But play safe and return RECOVERED for now.
+ */
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * liquidio_pcie_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the octeon_resume routine.
+ */
+static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
+{
+ /* Nothing to be done here. */
+}
+
+#define liquidio_suspend NULL
+#define liquidio_resume NULL
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static const struct pci_error_handlers liquidio_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+ .mmio_enabled = liquidio_pcie_mmio_enabled,
+ .slot_reset = liquidio_pcie_slot_reset,
+ .resume = liquidio_pcie_resume,
+};
+
+static const struct pci_device_id liquidio_pci_tbl[] = {
+ { /* 68xx */
+ PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ { /* 66xx */
+ PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ { /* 23xx pf */
+ PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
+
+static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
+
+static struct pci_driver liquidio_pci_driver = {
+ .name = "LiquidIO",
+ .id_table = liquidio_pci_tbl,
+ .probe = liquidio_probe,
+ .remove = liquidio_remove,
+ .err_handler = &liquidio_err_handler, /* For AER */
+ .driver.pm = &liquidio_pm_ops,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = liquidio_enable_sriov,
+#endif
+};
+
+/**
+ * liquidio_init_pci - register PCI driver
+ */
+static int liquidio_init_pci(void)
+{
+ return pci_register_driver(&liquidio_pci_driver);
+}
+
+/**
+ * liquidio_deinit_pci - unregister PCI driver
+ */
+static void liquidio_deinit_pci(void)
+{
+ pci_unregister_driver(&liquidio_pci_driver);
+}
+
+/**
+ * check_txq_status - Check Tx queue status, and take appropriate action
+ * @lio: per-network private data
+ * Return: 0 if full, number of queues woken up otherwise
+ */
+static inline int check_txq_status(struct lio *lio)
+{
+ int numqs = lio->netdev->real_num_tx_queues;
+ int ret_val = 0;
+ int q, iq;
+
+ /* check each sub-queue state */
+ for (q = 0; q < numqs; q++) {
+ iq = lio->linfo.txpciq[q %
+ lio->oct_dev->num_iqs].s.q_no;
+ if (octnet_iq_is_full(lio->oct_dev, iq))
+ continue;
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ netif_wake_subqueue(lio->netdev, q);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
+ tx_restart, 1);
+ ret_val++;
+ }
+ }
+
+ return ret_val;
+}
+
+/**
+ * print_link_info - Print link information
+ * @netdev: network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
+ ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.link_up) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
+ * this API is invoked only when new max-MTU of the interface is
+ * less than current MTU.
+ */
+ rtnl_lock();
+ dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
+ rtnl_unlock();
+}
+
+/**
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
+ */
+static inline int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static inline void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static inline void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int changed = (lio->linfo.link.u64 != ls->u64);
+ int current_max_mtu = lio->linfo.link.s.mtu;
+ struct octeon_device *oct = lio->oct_dev;
+
+ dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
+ __func__, lio->linfo.link.u64, ls->u64);
+ lio->linfo.link.u64 = ls->u64;
+
+ if ((lio->intf_open) && (changed)) {
+ print_link_info(netdev);
+ lio->link_changes++;
+
+ if (lio->linfo.link.s.link_up) {
+ dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
+ netif_carrier_on(netdev);
+ wake_txqs(netdev);
+ } else {
+ dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
+ netif_carrier_off(netdev);
+ stop_txqs(netdev);
+ }
+ if (lio->linfo.link.s.mtu != current_max_mtu) {
+ netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
+ current_max_mtu, lio->linfo.link.s.mtu);
+ netdev->max_mtu = lio->linfo.link.s.mtu;
+ }
+ if (lio->linfo.link.s.mtu < netdev->mtu) {
+ dev_warn(&oct->pci_dev->dev,
+ "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
+ netdev->mtu, lio->linfo.link.s.mtu);
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ }
+ }
+}
+
+/**
+ * lio_sync_octeon_time - send latest localtime to octeon firmware so that
+ * firmware will correct it's time, in case there is a time skew
+ *
+ * @work: work scheduled to send time update to octeon firmware
+ **/
+static void lio_sync_octeon_time(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ struct timespec64 ts;
+ struct lio_time *lt;
+ int ret;
+
+ sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to sync time to octeon: soft command allocation failed\n");
+ return;
+ }
+
+ lt = (struct lio_time *)sc->virtdptr;
+
+ /* Get time of the day */
+ ktime_get_real_ts64(&ts);
+ lt->sec = ts.tv_sec;
+ lt->nsec = ts.tv_nsec;
+ octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ ret = octeon_send_soft_command(oct, sc);
+ if (ret == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev,
+ "Failed to sync time to octeon: failed to send soft command\n");
+ octeon_free_soft_command(oct, sc);
+ } else {
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ queue_delayed_work(lio->sync_octeon_time_wq.wq,
+ &lio->sync_octeon_time_wq.wk.work,
+ msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
+}
+
+/**
+ * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
+ *
+ * @netdev: network device which should send time update to firmware
+ **/
+static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->sync_octeon_time_wq.wq =
+ alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
+ if (!lio->sync_octeon_time_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
+ lio_sync_octeon_time);
+ lio->sync_octeon_time_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->sync_octeon_time_wq.wq,
+ &lio->sync_octeon_time_wq.wk.work,
+ msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
+
+ return 0;
+}
+
+/**
+ * cleanup_sync_octeon_time_wq - destroy wq
+ *
+ * @netdev: network device which should send time update to firmware
+ *
+ * Stop scheduling and destroy the work created to periodically update local
+ * time to octeon firmware.
+ **/
+static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
+
+ if (time_wq->wq) {
+ cancel_delayed_work_sync(&time_wq->wk.work);
+ destroy_workqueue(time_wq->wq);
+ }
+}
+
+static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
+{
+ struct octeon_device *other_oct;
+
+ other_oct = lio_get_device(oct->octeon_id + 1);
+
+ if (other_oct && other_oct->pci_dev) {
+ int oct_busnum, other_oct_busnum;
+
+ oct_busnum = oct->pci_dev->bus->number;
+ other_oct_busnum = other_oct->pci_dev->bus->number;
+
+ if (oct_busnum == other_oct_busnum) {
+ int oct_slot, other_oct_slot;
+
+ oct_slot = PCI_SLOT(oct->pci_dev->devfn);
+ other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
+
+ if (oct_slot == other_oct_slot)
+ return other_oct;
+ }
+ }
+
+ return NULL;
+}
+
+static void disable_all_vf_links(struct octeon_device *oct)
+{
+ struct net_device *netdev;
+ int max_vfs, vf, i;
+
+ if (!oct)
+ return;
+
+ max_vfs = oct->sriov_info.max_vfs;
+
+ for (i = 0; i < oct->ifcount; i++) {
+ netdev = oct->props[i].netdev;
+ if (!netdev)
+ continue;
+
+ for (vf = 0; vf < max_vfs; vf++)
+ liquidio_set_vf_link_state(netdev, vf,
+ IFLA_VF_LINK_STATE_DISABLE);
+ }
+}
+
+static int liquidio_watchdog(void *param)
+{
+ bool err_msg_was_printed[LIO_MAX_CORES];
+ u16 mask_of_crashed_or_stuck_cores = 0;
+ bool all_vf_links_are_disabled = false;
+ struct octeon_device *oct = param;
+ struct octeon_device *other_oct;
+#ifdef CONFIG_MODULE_UNLOAD
+ long refcount, vfs_referencing_pf;
+ u64 vfs_mask1, vfs_mask2;
+#endif
+ int core;
+
+ memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
+
+ while (!kthread_should_stop()) {
+ /* sleep for a couple of seconds so that we don't hog the CPU */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(2000));
+
+ mask_of_crashed_or_stuck_cores =
+ (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
+
+ if (!mask_of_crashed_or_stuck_cores)
+ continue;
+
+ WRITE_ONCE(oct->cores_crashed, true);
+ other_oct = get_other_octeon_device(oct);
+ if (other_oct)
+ WRITE_ONCE(other_oct->cores_crashed, true);
+
+ for (core = 0; core < LIO_MAX_CORES; core++) {
+ bool core_crashed_or_got_stuck;
+
+ core_crashed_or_got_stuck =
+ (mask_of_crashed_or_stuck_cores
+ >> core) & 1;
+
+ if (core_crashed_or_got_stuck &&
+ !err_msg_was_printed[core]) {
+ dev_err(&oct->pci_dev->dev,
+ "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
+ core);
+ err_msg_was_printed[core] = true;
+ }
+ }
+
+ if (all_vf_links_are_disabled)
+ continue;
+
+ disable_all_vf_links(oct);
+ disable_all_vf_links(other_oct);
+ all_vf_links_are_disabled = true;
+
+#ifdef CONFIG_MODULE_UNLOAD
+ vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
+ vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
+
+ vfs_referencing_pf = hweight64(vfs_mask1);
+ vfs_referencing_pf += hweight64(vfs_mask2);
+
+ refcount = module_refcount(THIS_MODULE);
+ if (refcount >= vfs_referencing_pf) {
+ while (vfs_referencing_pf) {
+ module_put(THIS_MODULE);
+ vfs_referencing_pf--;
+ }
+ }
+#endif
+ }
+
+ return 0;
+}
+
+/**
+ * liquidio_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
+ */
+static int
+liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
+{
+ struct octeon_device *oct_dev = NULL;
+ struct handshake *hs;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->device == OCTEON_CN23XX_PF_VID)
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
+ /* Enable PTP for 6XXX Device */
+ if (((pdev->device == OCTEON_CN66XX) ||
+ (pdev->device == OCTEON_CN68XX)))
+ oct_dev->ptp_enable = true;
+ else
+ oct_dev->ptp_enable = false;
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = (void *)pdev;
+
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
+ hs = &handshake[oct_dev->octeon_id];
+ init_completion(&hs->init);
+ init_completion(&hs->started);
+ hs->pci_dev = pdev;
+
+ if (oct_dev->octeon_id == 0)
+ /* first LiquidIO NIC is detected */
+ complete(&first_stage);
+
+ if (octeon_device_init(oct_dev)) {
+ complete(&hs->init);
+ liquidio_remove(pdev);
+ return -ENOMEM;
+ }
+
+ if (OCTEON_CN23XX_PF(oct_dev)) {
+ u8 bus, device, function;
+
+ if (atomic_read(oct_dev->adapter_refcount) == 1) {
+ /* Each NIC gets one watchdog kernel thread. The first
+ * PF (of each NIC) that gets pci_driver->probe()'d
+ * creates that thread.
+ */
+ bus = pdev->bus->number;
+ device = PCI_SLOT(pdev->devfn);
+ function = PCI_FUNC(pdev->devfn);
+ oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
+ oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx",
+ bus, device, function);
+ if (IS_ERR(oct_dev->watchdog_task)) {
+ oct_dev->watchdog_task = NULL;
+ dev_err(&oct_dev->pci_dev->dev,
+ "failed to create kernel_thread\n");
+ liquidio_remove(pdev);
+ return -1;
+ }
+ }
+ }
+
+ oct_dev->rx_pause = 1;
+ oct_dev->tx_pause = 1;
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+static bool fw_type_is_auto(void)
+{
+ return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
+ sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
+}
+
+/**
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
+ */
+static void octeon_pci_flr(struct octeon_device *oct)
+{
+ int rc;
+
+ pci_save_state(oct->pci_dev);
+
+ pci_cfg_access_lock(oct->pci_dev);
+
+ /* Quiesce the device completely */
+ pci_write_config_word(oct->pci_dev, PCI_COMMAND,
+ PCI_COMMAND_INTX_DISABLE);
+
+ rc = __pci_reset_function_locked(oct->pci_dev);
+
+ if (rc != 0)
+ dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
+ rc, oct->pf_num);
+
+ pci_cfg_access_unlock(oct->pci_dev);
+
+ pci_restore_state(oct->pci_dev);
+}
+
+/**
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ int i, refcount;
+ struct msix_entry *msix_entries;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+
+ struct handshake *hs;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ fallthrough;
+ case OCT_DEV_HOST_OK:
+
+ case OCT_DEV_CONSOLE_INIT_DONE:
+ /* Remove any consoles */
+ octeon_remove_consoles(oct);
+
+ fallthrough;
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to
+ * complete.
+ */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ lio_process_ordered_list(oct, 1);
+ octeon_free_sc_done_list(oct);
+ octeon_free_sc_zombie_list(oct);
+
+ fallthrough;
+ case OCT_DEV_INTR_SET_DONE:
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs - 1; i++) {
+ if (oct->ioq_vector[i].vector) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
+ }
+ /* non-iov vector's argument is oct struct */
+ free_irq(msix_entries[i].vector, oct);
+
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ } else {
+ /* Release the interrupt line */
+ free_irq(oct->pci_dev->irq, oct);
+
+ if (oct->flags & LIO_FLAG_MSI_ENABLED)
+ pci_disable_msi(oct->pci_dev);
+ }
+
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+
+ fallthrough;
+ case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
+ if (OCTEON_CN23XX_PF(oct))
+ octeon_free_ioq_vector(oct);
+
+ fallthrough;
+ case OCT_DEV_MBOX_SETUP_DONE:
+ if (OCTEON_CN23XX_PF(oct))
+ oct->fn_list.free_mbox(oct);
+
+ fallthrough;
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ /* Wait for any pending operations */
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ /* Force any pending handshakes to complete */
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+
+ if (hs->pci_dev) {
+ handshake[oct->octeon_id].init_ok = 0;
+ complete(&handshake[oct->octeon_id].init);
+ handshake[oct->octeon_id].started_ok = 0;
+ complete(&handshake[oct->octeon_id].started);
+ }
+ }
+
+ fallthrough;
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ fallthrough;
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+#ifdef CONFIG_PCI_IOV
+ if (oct->sriov_info.sriov_enabled)
+ pci_disable_sriov(oct->pci_dev);
+#endif
+ fallthrough;
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ fallthrough;
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ fallthrough;
+ case OCT_DEV_PCI_MAP_DONE:
+ refcount = octeon_deregister_device(oct);
+
+ /* Soft reset the octeon device before exiting.
+ * However, if fw was loaded from card (i.e. autoboot),
+ * perform an FLR instead.
+ * Implementation note: only soft-reset the device
+ * if it is a CN6XXX OR the LAST CN23XX device.
+ */
+ if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
+ octeon_pci_flr(oct);
+ else if (OCTEON_CN6XXX(oct) || !refcount)
+ oct->fn_list.soft_reset(oct);
+
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ fallthrough;
+ case OCT_DEV_PCI_ENABLE_DONE:
+ pci_clear_master(oct->pci_dev);
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ fallthrough;
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ } /* end switch (oct->status) */
+
+ tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
+ */
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ int retval;
+
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
+
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ octeon_free_soft_command(oct, sc);
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ oct->props[lio->ifidx].rx_on = start_stop;
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+/**
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ liquidio_stop(netdev);
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
+ tasklet_enable(&oct_priv->droq_tasklet);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ cleanup_sync_octeon_time_wq(netdev);
+ cleanup_link_status_change_wq(netdev);
+
+ cleanup_rx_oom_poll_fn(netdev);
+
+ lio_delete_glists(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].gmxport = -1;
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ int i, j;
+ struct lio *lio;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ device_lock(&oct->pci_dev->dev);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+ device_unlock(&oct->pci_dev->dev);
+
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
+ lio_vf_rep_destroy(oct);
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < oct->num_oqs; j++)
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * liquidio_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
+ */
+static void liquidio_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->watchdog_task)
+ kthread_stop(oct_dev->watchdog_task);
+
+ if (!oct_dev->octeon_id &&
+ oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
+ lio_vf_rep_modexit();
+
+ if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
+ * @oct: octeon device
+ */
+static int octeon_chip_specific_setup(struct octeon_device *oct)
+{
+ u32 dev_id, rev_id;
+ int ret = 1;
+
+ pci_read_config_dword(oct->pci_dev, 0, &dev_id);
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ switch (dev_id) {
+ case OCTEON_CN68XX_PCIID:
+ oct->chip_id = OCTEON_CN68XX;
+ ret = lio_setup_cn68xx_octeon_device(oct);
+ break;
+
+ case OCTEON_CN66XX_PCIID:
+ oct->chip_id = OCTEON_CN66XX;
+ ret = lio_setup_cn66xx_octeon_device(oct);
+ break;
+
+ case OCTEON_CN23XX_PCIID_PF:
+ oct->chip_id = OCTEON_CN23XX_PF_VID;
+ ret = setup_cn23xx_octeon_pf_device(oct);
+ if (ret)
+ break;
+#ifdef CONFIG_PCI_IOV
+ if (!ret)
+ pci_sriov_set_totalvfs(oct->pci_dev,
+ oct->sriov_info.max_vfs);
+#endif
+ break;
+
+ default:
+ dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
+ dev_id);
+ }
+
+ return ret;
+}
+
+/**
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+ /* setup PCI stuff first */
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ pci_disable_device(oct->pci_dev);
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+/**
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct sk_buff *skb;
+ struct octnet_buf_free_info *finfo;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags, iq;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ i++;
+ }
+
+ iq = skb_iq(lio->oct_dev, skb);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octeon_soft_command *sc;
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+ struct octnic_gather *g;
+ int i, frags, iq;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ i++;
+ }
+
+ iq = skb_iq(lio->oct_dev, skb);
+
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ /* Don't free the skb yet */
+}
+
+/**
+ * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @ppb: how much to adjust by, in parts-per-billion
+ */
+static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ u64 comp, delta;
+ unsigned long flags;
+ bool neg_adj = false;
+
+ if (ppb < 0) {
+ neg_adj = true;
+ ppb = -ppb;
+ }
+
+ /* The hardware adds the clock compensation value to the
+ * PTP clock on every coprocessor clock cycle, so we
+ * compute the delta in terms of coprocessor clocks.
+ */
+ delta = (u64)ppb << 32;
+ do_div(delta, oct->coproc_clock_rate);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
+ if (neg_adj)
+ comp -= delta;
+ else
+ comp += delta;
+ lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * liquidio_ptp_adjtime - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
+ */
+static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio->ptp_adjust += delta;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
+ ns += lio->ptp_adjust;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/**
+ * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
+ */
+static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ u64 ns;
+ unsigned long flags;
+ struct lio *lio = container_of(ptp, struct lio, ptp_info);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ ns = timespec64_to_ns(ts);
+
+ spin_lock_irqsave(&lio->ptp_lock, flags);
+ lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
+ lio->ptp_adjust = 0;
+ spin_unlock_irqrestore(&lio->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * liquidio_ptp_enable - Check if PTP is enabled
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
+ */
+static int
+liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
+ struct ptp_clock_request __maybe_unused *rq,
+ int __maybe_unused on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * oct_ptp_open - Open PTP clock source
+ * @netdev: network device
+ */
+static void oct_ptp_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+
+ spin_lock_init(&lio->ptp_lock);
+
+ snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
+ lio->ptp_info.owner = THIS_MODULE;
+ lio->ptp_info.max_adj = 250000000;
+ lio->ptp_info.n_alarm = 0;
+ lio->ptp_info.n_ext_ts = 0;
+ lio->ptp_info.n_per_out = 0;
+ lio->ptp_info.pps = 0;
+ lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
+ lio->ptp_info.adjtime = liquidio_ptp_adjtime;
+ lio->ptp_info.gettime64 = liquidio_ptp_gettime;
+ lio->ptp_info.settime64 = liquidio_ptp_settime;
+ lio->ptp_info.enable = liquidio_ptp_enable;
+
+ lio->ptp_adjust = 0;
+
+ lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
+ &oct->pci_dev->dev);
+
+ if (IS_ERR(lio->ptp_clock))
+ lio->ptp_clock = NULL;
+}
+
+/**
+ * liquidio_ptp_init - Init PTP clock
+ * @oct: octeon device
+ */
+static void liquidio_ptp_init(struct octeon_device *oct)
+{
+ u64 clock_comp, cfg;
+
+ clock_comp = (u64)NSEC_PER_SEC << 32;
+ do_div(clock_comp, oct->coproc_clock_rate);
+ lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
+
+ /* Enable */
+ cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
+ lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
+}
+
+/**
+ * load_firmware - Load firmware to device
+ * @oct: octeon device
+ *
+ * Maps device to firmware filename, requests firmware, and downloads it
+ */
+static int load_firmware(struct octeon_device *oct)
+{
+ int ret = 0;
+ const struct firmware *fw;
+ char fw_name[LIO_MAX_FW_FILENAME_LEN];
+ char *tmp_fw_type;
+
+ if (fw_type_is_auto()) {
+ tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
+ strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
+ } else {
+ tmp_fw_type = fw_type;
+ }
+
+ sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
+ octeon_get_conf(oct)->card_name, tmp_fw_type,
+ LIO_FW_NAME_SUFFIX);
+
+ ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
+ fw_name);
+ release_firmware(fw);
+ return ret;
+ }
+
+ ret = octeon_download_firmware(oct, fw->data, fw->size);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+/**
+ * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
+ * @work: work_struct data structure
+ */
+static void octnet_poll_check_txq_status(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
+ return;
+
+ check_txq_status(lio);
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+}
+
+/**
+ * setup_tx_poll_fn - Sets up the txq poll check
+ * @netdev: network device
+ */
+static inline int setup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->txq_status_wq.wq = alloc_workqueue("txq-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->txq_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
+ octnet_poll_check_txq_status);
+ lio->txq_status_wq.wk.ctxptr = lio;
+ queue_delayed_work(lio->txq_status_wq.wq,
+ &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
+ return 0;
+}
+
+static inline void cleanup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->txq_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+ }
+}
+
+/**
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ int ret = 0;
+
+ if (oct->props[lio->ifidx].napi_enabled == 0) {
+ tasklet_disable(&oct_priv->droq_tasklet);
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 1;
+ }
+
+ if (oct->ptp_enable)
+ oct_ptp_open(netdev);
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
+ ret = setup_tx_poll_fn(netdev);
+ if (ret)
+ goto err_poll;
+ }
+
+ netif_tx_start_all_queues(netdev);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+
+ /* tell Octeon to start forwarding packets to host */
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ goto err_rx_ctrl;
+
+ /* start periodical statistics fetch */
+ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
+ lio->stats_wk.ctxptr = lio;
+ schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
+ (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
+ netdev->name);
+
+ return 0;
+
+err_rx_ctrl:
+ if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+err_poll:
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * liquidio_stop - Net device stop for LiquidIO
+ * @netdev: network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ int ret = 0;
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ /* Stop any link updates */
+ lio->intf_open = 0;
+
+ stop_txqs(netdev);
+
+ /* Inform that netif carrier is down */
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ lio->linfo.link.s.link_up = 0;
+ lio->link_changes++;
+
+ /* Tell Octeon that nic interface is down. */
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
+
+ if (OCTEON_CN23XX_PF(oct)) {
+ if (!oct->msix_on)
+ cleanup_tx_poll_fn(netdev);
+ } else {
+ cleanup_tx_poll_fn(netdev);
+ }
+
+ cancel_delayed_work_sync(&lio->stats_wk.work);
+
+ if (lio->ptp_clock) {
+ ptp_clock_unregister(lio->ptp_clock);
+ lio->ptp_clock = NULL;
+ }
+
+ /* Wait for any pending Rx descriptors */
+ if (lio_wait_for_clean_oq(oct))
+ netif_info(lio, rx_err, lio->netdev,
+ "Proceeding with stop interface after partial RX desc processing\n");
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ if (OCTEON_CN23XX_PF(oct))
+ oct->droq[0]->ops.poll_mode = 0;
+
+ tasklet_enable(&oct_priv->droq_tasklet);
+ }
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+
+ return ret;
+}
+
+/**
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+/**
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret;
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
+ /* no need to swap bytes */
+
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+}
+
+/**
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: pointer to sockaddr
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ int ret = 0;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct octnic_ctrl_pkt nctrl;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+
+ if (nctrl.sc_status) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: MAC Address change failed. sc return=%x\n",
+ __func__, nctrl.sc_status);
+ return -EIO;
+ }
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+ memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ iq_no = lio->linfo.txpciq[i].s.q_no;
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+ lstats->collisions = oct->link_stats.fromhost.total_collisions;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
+ /* recv'r fifo overrun */
+ lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
+
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors + lstats->rx_fifo_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+ lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors +
+ lstats->tx_fifo_errors;
+}
+
+/**
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config conf;
+ struct lio *lio = GET_LIO(netdev);
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (lio->oct_dev->ptp_enable)
+ return hwtstamp_ioctl(netdev, ifr);
+ fallthrough;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * handle_timestamp - handle a Tx timestamp response
+ * @oct: octeon device
+ * @status: response status
+ * @buf: pointer to skb
+ */
+static void handle_timestamp(struct octeon_device *oct,
+ u32 status,
+ void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct oct_timestamp_resp *resp;
+ struct lio *lio;
+ struct sk_buff *skb = (struct sk_buff *)buf;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ tx_buffer_free(skb);
+}
+
+/**
+ * send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
+ * @xmit_more: more is coming
+ */
+static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
+{
+ int retval;
+ struct octeon_soft_command *sc;
+ struct lio *lio;
+ int ring_doorbell;
+ u32 len;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ if (OCTEON_CN23XX_PF(oct))
+ len = (u32)((struct octeon_instr_ih3 *)
+ (&sc->cmd.cmd3.ih3))->dlengsz;
+ else
+ len = (u32)((struct octeon_instr_ih2 *)
+ (&sc->cmd.cmd2.ih2))->dlengsz;
+
+ ring_doorbell = !xmit_more;
+
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, len, ndata->reqtype);
+
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
+ *
+ * Return: whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct lio *lio;
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_device *oct;
+ struct oct_iq_stats *stats;
+ struct octeon_instr_irh *irh;
+ union tx_info *tx_info;
+ int status = 0;
+ int q_idx = 0, iq_no = 0;
+ int j, xmit_more = 0;
+ u64 dptr = 0;
+ u32 tag = 0;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ q_idx = skb_iq(oct, skb);
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.link_up) ||
+ (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.link_up);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = (void *)finfo;
+
+ ndata.q_no = iq_no;
+
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
+ */
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (skb_shinfo(skb)->nr_frags == 0) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
+ /* Offload checksum calculation for TCP/UDP packets */
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ stats->tx_dmamap_fail++;
+ return NETDEV_TX_BUSY;
+ }
+
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ int i, frags;
+ skb_frag_t *frag;
+ struct octnic_gather *g;
+
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ stats->tx_dmamap_fail++;
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
+ i++;
+ }
+
+ dptr = g->sg_dma_ptr;
+
+ if (OCTEON_CN23XX_PF(oct))
+ ndata.cmd.cmd3.dptr = dptr;
+ else
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ if (OCTEON_CN23XX_PF(oct)) {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+ } else {
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ stats->tx_gso++;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> 13;
+ irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP)
+ netif_stop_subqueue(netdev, q_idx);
+
+ netif_trans_update(netdev);
+
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
+ else
+ stats->tx_done++;
+ stats->tx_tot_bytes += ndata.datasize;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+
+ octeon_ring_doorbell_locked(oct, iq_no);
+
+ tx_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
+ */
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netif_trans_update(netdev);
+ wake_txqs(netdev);
+}
+
+static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+/**
+ * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
+ * Returns: SUCCESS or FAILURE
+ */
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+/**
+ * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @vxlan_port: VxLAN port to be added or deleted
+ * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * Return: SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "VxLAN port add/delete failed in core (ret:0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
+{
+ return liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table,
+ unsigned int entry,
+ struct udp_tunnel_info *ti)
+{
+ return liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
+static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
+ .set_port = liquidio_udp_tunnel_set_port,
+ .unset_port = liquidio_udp_tunnel_unset_port,
+ .tables = {
+ { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
+/**
+ * liquidio_fix_features - Net device fix features
+ * @netdev: pointer to network device
+ * @request: features requested
+ * Return: updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /*Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
+ request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return request;
+}
+
+/**
+ * liquidio_set_features - Net device set features
+ * @netdev: pointer to network device
+ * @features: features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO) &&
+ !(netdev->features & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO) &&
+ (netdev->features & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ /* Sending command to firmware to enable/disable RX checksum
+ * offload settings using ethtool
+ */
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev,
+ OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_DISABLE);
+
+ return 0;
+}
+
+static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
+ u8 *mac, bool is_admin_assigned)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
+ return -EINVAL;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ nctrl.ncmd.s.param1 = vfidx + 1;
+ nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ if (is_admin_assigned) {
+ nctrl.ncmd.s.param2 = true;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+ }
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
+
+ oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
+
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ if (ret > 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ int retval;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
+ if (!retval)
+ cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
+
+ return retval;
+}
+
+static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
+ bool enable)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int retval;
+
+ if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
+ netif_info(lio, drv, lio->netdev,
+ "firmware does not support spoofchk\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
+ netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ if (oct->sriov_info.vf_spoofchk[vfidx])
+ return 0;
+ } else {
+ /* Clear */
+ if (!oct->sriov_info.vf_spoofchk[vfidx])
+ return 0;
+ }
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
+ nctrl.ncmd.s.param1 =
+ vfidx + 1; /* vfidx is 0 based,
+ * but vf_num (param1) is 1 based
+ */
+ nctrl.ncmd.s.param2 = enable;
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = NULL;
+
+ retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ if (retval) {
+ netif_info(lio, drv, lio->netdev,
+ "Failed to set VF %d spoofchk %s\n", vfidx,
+ enable ? "on" : "off");
+ return -1;
+ }
+
+ oct->sriov_info.vf_spoofchk[vfidx] = enable;
+ netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
+ enable ? "on" : "off");
+
+ return 0;
+}
+
+static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ u16 vlantci;
+ int ret = 0;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ if (vlan >= VLAN_N_VID || qos > 7)
+ return -EINVAL;
+
+ if (vlan)
+ vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
+ else
+ vlantci = 0;
+
+ if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
+ return 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ if (vlan)
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ else
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+
+ nctrl.ncmd.s.param1 = vlantci;
+ nctrl.ncmd.s.param2 =
+ vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = NULL;
+
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ return ret;
+ }
+
+ oct->sriov_info.vf_vlantci[vfidx] = vlantci;
+
+ return ret;
+}
+
+static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ u8 *macaddr;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ memset(ivi, 0, sizeof(struct ifla_vf_info));
+
+ ivi->vf = vfidx;
+ macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
+ ether_addr_copy(&ivi->mac[0], macaddr);
+ ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
+ ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
+ if (oct->sriov_info.trusted_vf.active &&
+ oct->sriov_info.trusted_vf.id == vfidx)
+ ivi->trusted = true;
+ else
+ ivi->trusted = false;
+ ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
+ ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
+ ivi->max_tx_rate = lio->linfo.link.s.speed;
+ ivi->min_tx_rate = 0;
+
+ return 0;
+}
+
+static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
+{
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_soft_command *sc;
+ int retval;
+
+ sc = octeon_alloc_soft_command(oct, 0, 16, 0);
+ if (!sc)
+ return -ENOMEM;
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
+ trusted);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct, sc);
+ retval = -1;
+ } else {
+ /* Wait for response or timeout */
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (retval);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
+ bool setting)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
+ /* trusted vf is not supported by firmware older than 1.7.1 */
+ return -EOPNOTSUPP;
+ }
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
+ netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
+ return -EINVAL;
+ }
+
+ if (setting) {
+ /* Set */
+
+ if (oct->sriov_info.trusted_vf.active &&
+ oct->sriov_info.trusted_vf.id == vfidx)
+ return 0;
+
+ if (oct->sriov_info.trusted_vf.active) {
+ netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
+ return -EPERM;
+ }
+ } else {
+ /* Clear */
+
+ if (!oct->sriov_info.trusted_vf.active)
+ return 0;
+ }
+
+ if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
+ if (setting) {
+ oct->sriov_info.trusted_vf.id = vfidx;
+ oct->sriov_info.trusted_vf.active = true;
+ } else {
+ oct->sriov_info.trusted_vf.active = false;
+ }
+
+ netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
+ setting ? "" : "not ");
+ } else {
+ netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
+ int linkstate)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
+ return 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
+ nctrl.ncmd.s.param1 =
+ vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
+ nctrl.ncmd.s.param2 = linkstate;
+ nctrl.ncmd.s.more = 0;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.cb_fn = NULL;
+
+ ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
+
+ if (!ret)
+ oct->sriov_info.vf_linkstate[vfidx] = linkstate;
+ else if (ret > 0)
+ ret = -EIO;
+
+ return ret;
+}
+
+static int
+liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct lio_devlink_priv *priv;
+ struct octeon_device *oct;
+
+ priv = devlink_priv(devlink);
+ oct = priv->oct;
+
+ *mode = oct->eswitch_mode;
+
+ return 0;
+}
+
+static int
+liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct lio_devlink_priv *priv;
+ struct octeon_device *oct;
+ int ret = 0;
+
+ priv = devlink_priv(devlink);
+ oct = priv->oct;
+
+ if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
+ return -EINVAL;
+
+ if (oct->eswitch_mode == mode)
+ return 0;
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ oct->eswitch_mode = mode;
+ ret = lio_vf_rep_create(oct);
+ break;
+
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ lio_vf_rep_destroy(oct);
+ oct->eswitch_mode = mode;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct devlink_ops liquidio_devlink_ops = {
+ .eswitch_mode_get = liquidio_eswitch_mode_get,
+ .eswitch_mode_set = liquidio_eswitch_mode_set,
+};
+
+static int
+liquidio_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lio *lio = GET_LIO(dev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
+
+ return 0;
+}
+
+static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
+ struct ifla_vf_stats *vf_stats)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct oct_vf_stats stats;
+ int ret;
+
+ if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
+ return -EINVAL;
+
+ memset(&stats, 0, sizeof(struct oct_vf_stats));
+ ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
+ if (!ret) {
+ vf_stats->rx_packets = stats.rx_packets;
+ vf_stats->tx_packets = stats.tx_packets;
+ vf_stats->rx_bytes = stats.rx_bytes;
+ vf_stats->tx_bytes = stats.tx_bytes;
+ vf_stats->broadcast = stats.broadcast;
+ vf_stats->multicast = stats.multicast;
+ }
+
+ return ret;
+}
+
+static const struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats64 = liquidio_get_stats64,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_eth_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+ .ndo_set_vf_mac = liquidio_set_vf_mac,
+ .ndo_set_vf_vlan = liquidio_set_vf_vlan,
+ .ndo_get_vf_config = liquidio_get_vf_config,
+ .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
+ .ndo_set_vf_trust = liquidio_set_vf_trust,
+ .ndo_set_vf_link_state = liquidio_set_vf_link_state,
+ .ndo_get_vf_stats = liquidio_get_vf_stats,
+ .ndo_get_port_parent_id = liquidio_get_port_parent_id,
+};
+
+/**
+ * liquidio_init - Entry point for the liquidio module
+ */
+static int __init liquidio_init(void)
+{
+ int i;
+ struct handshake *hs;
+
+ init_completion(&first_stage);
+
+ octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
+
+ if (liquidio_init_pci())
+ return -EINVAL;
+
+ wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion(&hs->init);
+ if (!hs->init_ok) {
+ /* init handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Failed to init device\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
+ hs = &handshake[i];
+ if (hs->pci_dev) {
+ wait_for_completion_timeout(&hs->started,
+ msecs_to_jiffies(30000));
+ if (!hs->started_ok) {
+ /* starter handshake failed */
+ dev_err(&hs->pci_dev->dev,
+ "Firmware failed to start\n");
+ liquidio_deinit_pci();
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int gmxport = 0;
+ union oct_link_status *ls;
+ int i;
+
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.gmxport);
+ goto nic_info_err;
+ }
+
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ struct lio *lio = NULL;
+ struct net_device *netdev;
+ u8 mac[6], i, j, *fw_ver, *micro_ver;
+ unsigned long micro;
+ u32 cur_ver;
+ struct octeon_soft_command *sc;
+ struct liquidio_if_cfg_resp *resp;
+ struct octdev_props *props;
+ int retval, num_iqueues, num_oqueues;
+ int max_num_queues = 0;
+ union oct_nic_if_cfg if_cfg;
+ unsigned int base_queue;
+ unsigned int gmx_port_id;
+ u32 resp_size, data_size;
+ u32 ifidx_or_pfnum;
+ struct lio_version *vdata;
+ struct devlink *devlink;
+ struct lio_devlink_priv *lio_devlink;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, data_size,
+ resp_size, 0);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ num_iqueues = octeon_dev->sriov_info.num_pf_rings;
+ num_oqueues = octeon_dev->sriov_info.num_pf_rings;
+ base_queue = octeon_dev->sriov_info.pf_srn;
+
+ gmx_port_id = octeon_dev->pf_num;
+ ifidx_or_pfnum = octeon_dev->pf_num;
+ } else {
+ num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ base_queue = CFG_GET_BASE_QUE_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ gmx_port_id = CFG_GET_GMXID_NIC_IF(
+ octeon_get_conf(octeon_dev), i);
+ ifidx_or_pfnum = i;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "requesting config for interface %d, iqs %d, oqs %d\n",
+ ifidx_or_pfnum, num_iqueues, num_oqueues);
+
+ if_cfg.u64 = 0;
+ if_cfg.s.num_iqueues = num_iqueues;
+ if_cfg.s.num_oqueues = num_oqueues;
+ if_cfg.s.base_queue = base_queue;
+ if_cfg.s.gmx_port_id = gmx_port_id;
+
+ sc->iq_no = 0;
+
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, 0,
+ if_cfg.u64, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n",
+ retval);
+ /* Soft instr is freed by driver in case of failure. */
+ octeon_free_soft_command(octeon_dev, sc);
+ return(-EIO);
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ }
+ snprintf(octeon_dev->fw_info.liquidio_firmware_version,
+ 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
+ /* Verify f/w version (in case of 'auto' loading from flash) */
+ fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
+ if (memcmp(LIQUIDIO_BASE_VERSION,
+ fw_ver,
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION, fw_ver);
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ } else if (atomic_read(octeon_dev->adapter_fw_state) ==
+ FW_IS_PRELOADED) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "Using auto-loaded firmware version %s.\n",
+ fw_ver);
+ }
+
+ /* extract micro version field; point past '<maj>.<min>.' */
+ micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
+ if (kstrtoul(micro_ver, 10, &micro) != 0)
+ micro = 0;
+ octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
+ octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
+ octeon_dev->fw_info.ver.rev = micro;
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask,
+ resp->cfg_info.oqmask);
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ }
+
+ if (OCTEON_CN6XXX(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn6xxx));
+ } else if (OCTEON_CN23XX_PF(octeon_dev)) {
+ max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
+ cn23xx_pf));
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues, max_num_queues);
+ netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ }
+
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number rx failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_free;
+ }
+
+ retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "setting real number tx failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_free;
+ }
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+ for (j = 0; j < num_oqueues; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+ for (j = 0; j < num_iqueues; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ if (OCTEON_CN23XX_PF(octeon_dev) ||
+ OCTEON_CN6XXX(octeon_dev)) {
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+ }
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features = (lio->enc_dev_capability &
+ ~NETIF_F_LRO);
+
+ netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
+
+ lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
+
+ netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
+
+ netdev->hw_features = lio->dev_capability;
+ /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
+ netdev->hw_features = netdev->hw_features &
+ ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ /* MTU range: 68 - 16000 */
+ netdev->min_mtu = LIO_MIN_MTU_SIZE;
+ netdev->max_mtu = LIO_MAX_MTU_SIZE;
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
+ u8 vfmac[ETH_ALEN];
+
+ eth_random_addr(vfmac);
+ if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Error setting VF%d MAC address\n",
+ j);
+ goto setup_nic_dev_free;
+ }
+ }
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < 6; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+
+ eth_hw_addr_set(netdev, mac);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+ if (liquidio_setup_io_queues(octeon_dev, i,
+ lio->linfo.num_txpciq,
+ lio->linfo.num_rxpciq)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
+
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
+ OCTNET_CMD_VLAN_FILTER_ENABLE);
+
+ if ((debug != -1) && (debug & NETIF_MSG_HW))
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
+
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_free;
+
+ if ((octeon_dev->fw_info.app_cap_flags &
+ LIQUIDIO_TIME_SYNC_CAP) &&
+ setup_sync_octeon_time_wq(netdev))
+ goto setup_nic_dev_free;
+
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_free;
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+ lio->link_changes++;
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ if (octeon_dev->subsystem_id ==
+ OCTEON_CN2350_25GB_SUBSYS_ID ||
+ octeon_dev->subsystem_id ==
+ OCTEON_CN2360_25GB_SUBSYS_ID) {
+ cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
+ octeon_dev->fw_info.ver.min,
+ octeon_dev->fw_info.ver.rev);
+
+ /* speed control unsupported in f/w older than 1.7.2 */
+ if (cur_ver < OCT_FW_VER(1, 7, 2)) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "speed setting not supported by f/w.");
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ } else {
+ liquidio_get_speed(lio);
+ }
+
+ if (octeon_dev->speed_setting == 0) {
+ octeon_dev->speed_setting = 25;
+ octeon_dev->no_speed_setting = 1;
+ }
+ } else {
+ octeon_dev->no_speed_setting = 1;
+ octeon_dev->speed_setting = 10;
+ }
+ octeon_dev->speed_boot = octeon_dev->speed_setting;
+
+ /* don't read FEC setting if unsupported by f/w (see above) */
+ if (octeon_dev->speed_boot == 25 &&
+ !octeon_dev->no_speed_setting) {
+ liquidio_get_fec(lio);
+ octeon_dev->props[lio->ifidx].fec_boot =
+ octeon_dev->props[lio->ifidx].fec;
+ }
+ }
+
+ device_lock(&octeon_dev->pci_dev->dev);
+ devlink = devlink_alloc(&liquidio_devlink_ops,
+ sizeof(struct lio_devlink_priv),
+ &octeon_dev->pci_dev->dev);
+ if (!devlink) {
+ device_unlock(&octeon_dev->pci_dev->dev);
+ dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ lio_devlink = devlink_priv(devlink);
+ lio_devlink->oct = octeon_dev;
+
+ octeon_dev->devlink = devlink;
+ octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_register(devlink);
+ device_unlock(&octeon_dev->pci_dev->dev);
+
+ return 0;
+
+setup_nic_dev_free:
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+
+setup_nic_dev_done:
+
+ return -ENODEV;
+}
+
+#ifdef CONFIG_PCI_IOV
+static int octeon_enable_sriov(struct octeon_device *oct)
+{
+ unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
+ struct pci_dev *vfdev;
+ int err;
+ u32 u;
+
+ if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
+ err = pci_enable_sriov(oct->pci_dev,
+ oct->sriov_info.num_vfs_alloced);
+ if (err) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Failed to enable PCI sriov: %d\n",
+ err);
+ oct->sriov_info.num_vfs_alloced = 0;
+ return err;
+ }
+ oct->sriov_info.sriov_enabled = 1;
+
+ /* init lookup table that maps DPI ring number to VF pci_dev
+ * struct pointer
+ */
+ u = 0;
+ vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ OCTEON_CN23XX_VF_VID, NULL);
+ while (vfdev) {
+ if (vfdev->is_virtfn &&
+ (vfdev->physfn == oct->pci_dev)) {
+ oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
+ vfdev;
+ u += oct->sriov_info.rings_per_vf;
+ }
+ vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ OCTEON_CN23XX_VF_VID, vfdev);
+ }
+ }
+
+ return num_vfs_alloced;
+}
+
+static int lio_pci_sriov_disable(struct octeon_device *oct)
+{
+ int u;
+
+ if (pci_vfs_assigned(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
+ return -EPERM;
+ }
+
+ pci_disable_sriov(oct->pci_dev);
+
+ u = 0;
+ while (u < MAX_POSSIBLE_VFS) {
+ oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
+ u += oct->sriov_info.rings_per_vf;
+ }
+
+ oct->sriov_info.num_vfs_alloced = 0;
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
+ oct->pf_num);
+
+ return 0;
+}
+
+static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
+{
+ struct octeon_device *oct = pci_get_drvdata(dev);
+ int ret = 0;
+
+ if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
+ (oct->sriov_info.sriov_enabled)) {
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
+ oct->pf_num, num_vfs);
+ return 0;
+ }
+
+ if (!num_vfs) {
+ lio_vf_rep_destroy(oct);
+ ret = lio_pci_sriov_disable(oct);
+ } else if (num_vfs > oct->sriov_info.max_vfs) {
+ dev_err(&oct->pci_dev->dev,
+ "OCTEON: Max allowed VFs:%d user requested:%d",
+ oct->sriov_info.max_vfs, num_vfs);
+ ret = -EPERM;
+ } else {
+ oct->sriov_info.num_vfs_alloced = num_vfs;
+ ret = octeon_enable_sriov(oct);
+ dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
+ oct->pf_num, num_vfs);
+ ret = lio_vf_rep_create(oct);
+ if (ret)
+ dev_info(&oct->pci_dev->dev,
+ "vf representor create failed");
+ }
+
+ return ret;
+}
+#endif
+
+/**
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ int i, retval = 0;
+ int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well
+ */
+ /* run port_config command for each port */
+ oct->ifcount = num_nic_ports;
+
+ memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
+
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ /* Call vf_rep_modinit if the firmware is switchdev capable
+ * and do it from the first liquidio function probed.
+ */
+ if (!oct->octeon_id &&
+ oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
+ retval = lio_vf_rep_modinit();
+ if (retval) {
+ liquidio_stop_nic_module(oct);
+ goto octnet_init_failure;
+ }
+ }
+
+ liquidio_ptp_init(oct);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * nic_starter - finish init
+ * @work: work struct work_struct
+ *
+ * starter callback that invokes the remaining initialization work after the NIC is up and running.
+ */
+static void nic_starter(struct work_struct *work)
+{
+ struct octeon_device *oct;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+
+ oct = (struct octeon_device *)wk->ctxptr;
+
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
+ return;
+
+ /* If the status of the device is CORE_OK, the core
+ * application has reported its application type. Call
+ * any registered handlers now and move to the RUNNING
+ * state.
+ */
+ if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
+ schedule_delayed_work(&oct->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+ return;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
+ dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
+
+ if (liquidio_init_nic_module(oct))
+ dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
+ else
+ handshake[oct->octeon_id].started_ok = 1;
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "Unexpected application running on NIC (%d). Check firmware.\n",
+ oct->app_mode);
+ }
+
+ complete(&handshake[oct->octeon_id].started);
+}
+
+static int
+octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ int i, notice, vf_idx;
+ bool cores_crashed;
+ u64 *data, vf_num;
+
+ notice = recv_pkt->rh.r.ossp;
+ data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
+
+ /* the first 64-bit word of data is the vf_num */
+ vf_num = data[0];
+ octeon_swap_8B_data(&vf_num, 1);
+ vf_idx = (int)vf_num - 1;
+
+ cores_crashed = READ_ONCE(oct->cores_crashed);
+
+ if (notice == VF_DRV_LOADED) {
+ if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
+ oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
+ dev_info(&oct->pci_dev->dev,
+ "driver for VF%d was loaded\n", vf_idx);
+ if (!cores_crashed)
+ try_module_get(THIS_MODULE);
+ }
+ } else if (notice == VF_DRV_REMOVED) {
+ if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
+ oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
+ dev_info(&oct->pci_dev->dev,
+ "driver for VF%d was removed\n", vf_idx);
+ if (!cores_crashed)
+ module_put(THIS_MODULE);
+ }
+ } else if (notice == VF_DRV_MACADDR_CHANGED) {
+ u8 *b = (u8 *)&data[1];
+
+ oct->sriov_info.vf_macaddr[vf_idx] = data[1];
+ dev_info(&oct->pci_dev->dev,
+ "VF driver changed VF%d's MAC address to %pM\n",
+ vf_idx, b + 2);
+ }
+
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+}
+
+/**
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @octeon_dev: octeon device
+ */
+static int octeon_device_init(struct octeon_device *octeon_dev)
+{
+ int j, ret;
+ char bootcmd[] = "\n";
+ char *dbg_enb = NULL;
+ enum lio_fw_state fw_state;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)octeon_dev->priv;
+ atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(octeon_dev))
+ return 1;
+
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
+
+ /* Identify the Octeon type and map the BAR address space. */
+ if (octeon_chip_specific_setup(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
+
+ /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
+ * since that is what is required for the reference to be removed
+ * during de-initialization (see 'octeon_destroy_resources').
+ */
+ octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
+ PCI_SLOT(octeon_dev->pci_dev->devfn),
+ PCI_FUNC(octeon_dev->pci_dev->devfn),
+ true);
+
+ octeon_dev->app_mode = CVM_DRV_INVALID_APP;
+
+ /* CN23XX supports preloaded firmware if the following is true:
+ *
+ * The adapter indicates that firmware is currently running AND
+ * 'fw_type' is 'auto'.
+ *
+ * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
+ */
+ if (OCTEON_CN23XX_PF(octeon_dev) &&
+ cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
+ atomic_cmpxchg(octeon_dev->adapter_fw_state,
+ FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
+ }
+
+ /* If loading firmware, only first device of adapter needs to do so. */
+ fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
+ FW_NEEDS_TO_BE_LOADED,
+ FW_IS_BEING_LOADED);
+
+ /* Here, [local variable] 'fw_state' is set to one of:
+ *
+ * FW_IS_PRELOADED: No firmware is to be loaded (see above)
+ * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
+ * firmware to the adapter.
+ * FW_IS_BEING_LOADED: The driver's second instance will not load
+ * firmware to the adapter.
+ */
+
+ /* Prior to f/w load, perform a soft reset of the Octeon device;
+ * if error resetting, return w/error.
+ */
+ if (fw_state == FW_NEEDS_TO_BE_LOADED)
+ if (octeon_dev->fn_list.soft_reset(octeon_dev))
+ return 1;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(octeon_dev))
+ return 1;
+
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_CORE_DRV_ACTIVE,
+ octeon_core_drv_init,
+ octeon_dev);
+
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
+ OPCODE_NIC_VF_DRV_NOTICE,
+ octeon_recv_vf_drv_notice, octeon_dev);
+ INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
+ octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
+ schedule_delayed_work(&octeon_dev->nic_poll_work.work,
+ LIQUIDIO_STARTER_POLL_INTERVAL_MS);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ if (octeon_set_io_queues_off(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
+ return 1;
+ }
+
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
+ return ret;
+ }
+ }
+
+ /* Initialize soft command buffer pool
+ */
+ if (octeon_setup_sc_buffer_pool(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "instruction queue initialization failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
+ return 1;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
+
+ if (OCTEON_CN23XX_PF(octeon_dev)) {
+ if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
+
+ if (octeon_allocate_ioq_vector
+ (octeon_dev,
+ octeon_dev->sriov_info.num_pf_rings)) {
+ dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
+ return 1;
+ }
+ atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
+
+ } else {
+ /* The input and output queue registers were setup earlier (the
+ * queues were not enabled). Any additional registers
+ * that need to be programmed should be done now.
+ */
+ ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Failed to configure device registers\n");
+ return ret;
+ }
+ }
+
+ /* Initialize the tasklet that handles output queue packet processing.*/
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
+ tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
+
+ /* Setup the interrupt handler and record the INT SUM register address
+ */
+ if (octeon_setup_interrupt(octeon_dev,
+ octeon_dev->sriov_info.num_pf_rings))
+ return 1;
+
+ /* Enable Octeon device interrupts */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
+ * the output queue is enabled.
+ * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
+ * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
+ * Otherwise, it is possible that the DRV_ACTIVE message will be sent
+ * before any credits have been issued, causing the ring to be reset
+ * (and the f/w appear to never have started).
+ */
+ for (j = 0; j < octeon_dev->num_oqs; j++)
+ writel(octeon_dev->droq[j]->max_count,
+ octeon_dev->droq[j]->pkts_credit_reg);
+
+ /* Enable the input and output queues for this Octeon device */
+ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
+ return ret;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
+
+ if (fw_state == FW_NEEDS_TO_BE_LOADED) {
+ dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
+ if (!ddr_timeout) {
+ dev_info(&octeon_dev->pci_dev->dev,
+ "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
+ }
+
+ schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
+
+ /* Wait for the octeon to initialize DDR after the soft-reset.*/
+ while (!ddr_timeout) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
+ ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
+ ret);
+ return 1;
+ }
+
+ if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
+ return 1;
+ }
+
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
+ ret = octeon_init_consoles(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
+ return 1;
+ }
+ /* If console debug enabled, specify empty string to use default
+ * enablement ELSE specify NULL string for 'disabled'.
+ */
+ dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
+ ret = octeon_add_console(octeon_dev, 0, dbg_enb);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
+ return 1;
+ } else if (octeon_console_debug_enabled(0)) {
+ /* If console was added AND we're logging console output
+ * then set our console print function.
+ */
+ octeon_dev->console[0].print = octeon_dbg_console_print;
+ }
+
+ atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
+ ret = load_firmware(octeon_dev);
+ if (ret) {
+ dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
+ return 1;
+ }
+
+ atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
+ }
+
+ handshake[octeon_dev->octeon_id].init_ok = 1;
+ complete(&handshake[octeon_dev->octeon_id].init);
+
+ atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+ oct_priv->dev = octeon_dev;
+
+ return 0;
+}
+
+/**
+ * octeon_dbg_console_print - Debug console print function
+ * @oct: octeon device
+ * @console_num: console number
+ * @prefix: first portion of line to display
+ * @suffix: second portion of line to display
+ *
+ * The OCTEON debug console outputs entire lines (excluding '\n').
+ * Normally, the line will be passed in the 'prefix' parameter.
+ * However, due to buffering, it is possible for a line to be split into two
+ * parts, in which case they will be passed as the 'prefix' parameter and
+ * 'suffix' parameter.
+ */
+static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
+ char *prefix, char *suffix)
+{
+ if (prefix && suffix)
+ dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
+ suffix);
+ else if (prefix)
+ dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
+ else if (suffix)
+ dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
+
+ return 0;
+}
+
+/**
+ * liquidio_exit - Exits the module
+ */
+static void __exit liquidio_exit(void)
+{
+ liquidio_deinit_pci();
+
+ pr_info("LiquidIO network module is now unloaded\n");
+}
+
+module_init(liquidio_init);
+module_exit(liquidio_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
new file mode 100644
index 000000000..ac196883f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -0,0 +1,2442 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <net/vxlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn23xx_vf_device.h"
+
+MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
+MODULE_LICENSE("GPL");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+
+struct oct_timestamp_resp {
+ u64 rh;
+ u64 timestamp;
+ u64 status;
+};
+
+union tx_info {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u16 gso_size;
+ u16 gso_segs;
+ u32 reserved;
+#else
+ u32 reserved;
+ u16 gso_segs;
+ u16 gso_size;
+#endif
+ } s;
+};
+
+#define OCTNIC_GSO_MAX_HEADER_SIZE 128
+#define OCTNIC_GSO_MAX_SIZE \
+ (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
+
+static int
+liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void liquidio_vf_remove(struct pci_dev *pdev);
+static int octeon_device_init(struct octeon_device *oct);
+static int liquidio_stop(struct net_device *netdev);
+
+static int lio_wait_for_oq_pkts(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ int retry = MAX_IO_PENDING_PKT_COUNT;
+ int pkt_cnt = 0, pending_pkts;
+ int i;
+
+ do {
+ pending_pkts = 0;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
+ }
+ if (pkt_cnt > 0) {
+ pending_pkts += pkt_cnt;
+ tasklet_schedule(&oct_priv->droq_tasklet);
+ }
+ pkt_cnt = 0;
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && pending_pkts);
+
+ return pkt_cnt;
+}
+
+/**
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
+ */
+static void pcierror_quiesce_device(struct octeon_device *oct)
+{
+ int i;
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet processing
+ * to finish.
+ */
+
+ /* To allow for in-flight requests */
+ schedule_timeout_uninterruptible(100);
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to complete. */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ /* Force all pending ordered list requests to time out. */
+ lio_process_ordered_list(oct, 1);
+
+ /* We do not need to wait for output queue packets to be processed. */
+}
+
+/**
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
+ */
+static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
+{
+ u32 status, mask;
+ int pos = 0x100;
+
+ pr_info("%s :\n", __func__);
+
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
+ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
+ if (dev->error_state == pci_channel_io_normal)
+ status &= ~mask; /* Clear corresponding nonfatal bits */
+ else
+ status &= mask; /* Clear corresponding fatal bits */
+ pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
+}
+
+/**
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
+ */
+static void stop_pci_io(struct octeon_device *oct)
+{
+ struct msix_entry *msix_entries;
+ int i;
+
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ for (i = 0; i < oct->ifcount; i++)
+ netif_device_detach(oct->props[i].netdev);
+
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ pcierror_quiesce_device(oct);
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs; i++) {
+ /* clear the affinity_cpumask */
+ irq_set_affinity_hint(msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ octeon_free_ioq_vector(oct);
+ }
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ /* making it a common function for all OCTEON models */
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+
+ pci_disable_device(oct->pci_dev);
+}
+
+/**
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct octeon_device *oct = pci_get_drvdata(pdev);
+
+ /* Non-correctable Non-fatal errors */
+ if (state == pci_channel_io_normal) {
+ dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
+ cleanup_aer_uncorrect_error_status(oct->pci_dev);
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ }
+
+ /* Non-correctable Fatal errors */
+ dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
+ stop_pci_io(oct);
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/* For PCI-E Advanced Error Recovery (AER) Interface */
+static const struct pci_error_handlers liquidio_vf_err_handler = {
+ .error_detected = liquidio_pcie_error_detected,
+};
+
+static const struct pci_device_id liquidio_vf_pci_tbl[] = {
+ {
+ PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
+ },
+ {
+ 0, 0, 0, 0, 0, 0, 0
+ }
+};
+MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
+
+static struct pci_driver liquidio_vf_pci_driver = {
+ .name = "LiquidIO_VF",
+ .id_table = liquidio_vf_pci_tbl,
+ .probe = liquidio_vf_probe,
+ .remove = liquidio_vf_remove,
+ .err_handler = &liquidio_vf_err_handler, /* For AER */
+};
+
+/**
+ * print_link_info - Print link information
+ * @netdev: network device
+ */
+static void print_link_info(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
+ ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
+ struct oct_link_info *linfo = &lio->linfo;
+
+ if (linfo->link.s.link_up) {
+ netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
+ linfo->link.s.speed,
+ (linfo->link.s.duplex) ? "Full" : "Half");
+ } else {
+ netif_info(lio, link, lio->netdev, "Link Down\n");
+ }
+ }
+}
+
+/**
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
+ */
+static void octnet_link_status_change(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio *lio = (struct lio *)wk->ctxptr;
+
+ /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
+ * this API is invoked only when new max-MTU of the interface is
+ * less than current MTU.
+ */
+ rtnl_lock();
+ dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
+ rtnl_unlock();
+}
+
+/**
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
+ */
+static int setup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ lio->link_status_wq.wq = alloc_workqueue("link-status",
+ WQ_MEM_RECLAIM, 0);
+ if (!lio->link_status_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
+ return -1;
+ }
+ INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
+ octnet_link_status_change);
+ lio->link_status_wq.wk.ctxptr = lio;
+
+ return 0;
+}
+
+static void cleanup_link_status_change_wq(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ if (lio->link_status_wq.wq) {
+ cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
+ destroy_workqueue(lio->link_status_wq.wq);
+ }
+}
+
+/**
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
+ *
+ * Called on receipt of a link status response from the core application to
+ * update each interface's link status.
+ */
+static void update_link_status(struct net_device *netdev,
+ union oct_link_status *ls)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int current_max_mtu = lio->linfo.link.s.mtu;
+ struct octeon_device *oct = lio->oct_dev;
+
+ if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
+ lio->linfo.link.u64 = ls->u64;
+
+ print_link_info(netdev);
+ lio->link_changes++;
+
+ if (lio->linfo.link.s.link_up) {
+ netif_carrier_on(netdev);
+ wake_txqs(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ stop_txqs(netdev);
+ }
+
+ if (lio->linfo.link.s.mtu != current_max_mtu) {
+ dev_info(&oct->pci_dev->dev,
+ "Max MTU Changed from %d to %d\n",
+ current_max_mtu, lio->linfo.link.s.mtu);
+ netdev->max_mtu = lio->linfo.link.s.mtu;
+ }
+
+ if (lio->linfo.link.s.mtu < netdev->mtu) {
+ dev_warn(&oct->pci_dev->dev,
+ "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
+ netdev->mtu, lio->linfo.link.s.mtu);
+ queue_delayed_work(lio->link_status_wq.wq,
+ &lio->link_status_wq.wk.work, 0);
+ }
+ }
+}
+
+/**
+ * liquidio_vf_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
+ */
+static int
+liquidio_vf_probe(struct pci_dev *pdev,
+ const struct pci_device_id __maybe_unused *ent)
+{
+ struct octeon_device *oct_dev = NULL;
+
+ oct_dev = octeon_allocate_device(pdev->device,
+ sizeof(struct octeon_device_priv));
+
+ if (!oct_dev) {
+ dev_err(&pdev->dev, "Unable to allocate device\n");
+ return -ENOMEM;
+ }
+ oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
+
+ dev_info(&pdev->dev, "Initializing device %x:%x.\n",
+ (u32)pdev->vendor, (u32)pdev->device);
+
+ /* Assign octeon_device for this device to the private data area. */
+ pci_set_drvdata(pdev, oct_dev);
+
+ /* set linux specific device pointer */
+ oct_dev->pci_dev = pdev;
+
+ oct_dev->subsystem_id = pdev->subsystem_vendor |
+ (pdev->subsystem_device << 16);
+
+ if (octeon_device_init(oct_dev)) {
+ liquidio_vf_remove(pdev);
+ return -ENOMEM;
+ }
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
+
+ return 0;
+}
+
+/**
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
+ */
+static void octeon_pci_flr(struct octeon_device *oct)
+{
+ pci_save_state(oct->pci_dev);
+
+ pci_cfg_access_lock(oct->pci_dev);
+
+ /* Quiesce the device completely */
+ pci_write_config_word(oct->pci_dev, PCI_COMMAND,
+ PCI_COMMAND_INTX_DISABLE);
+
+ pcie_flr(oct->pci_dev);
+
+ pci_cfg_access_unlock(oct->pci_dev);
+
+ pci_restore_state(oct->pci_dev);
+}
+
+/**
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
+ */
+static void octeon_destroy_resources(struct octeon_device *oct)
+{
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct msix_entry *msix_entries;
+ int i;
+
+ switch (atomic_read(&oct->status)) {
+ case OCT_DEV_RUNNING:
+ case OCT_DEV_CORE_OK:
+ /* No more instructions will be forwarded. */
+ atomic_set(&oct->status, OCT_DEV_IN_RESET);
+
+ oct->app_mode = CVM_DRV_INVALID_APP;
+ dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
+ lio_get_state_string(&oct->status));
+
+ schedule_timeout_uninterruptible(HZ / 10);
+
+ fallthrough;
+ case OCT_DEV_HOST_OK:
+ case OCT_DEV_IO_QUEUES_DONE:
+ if (lio_wait_for_instr_fetch(oct))
+ dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
+
+ if (wait_for_pending_requests(oct))
+ dev_err(&oct->pci_dev->dev, "There were pending requests\n");
+
+ /* Disable the input and output queues now. No more packets will
+ * arrive from Octeon, but we should wait for all packet
+ * processing to finish.
+ */
+ oct->fn_list.disable_io_queues(oct);
+
+ if (lio_wait_for_oq_pkts(oct))
+ dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
+
+ /* Force all requests waiting to be fetched by OCTEON to
+ * complete.
+ */
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ struct octeon_instr_queue *iq;
+
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ iq = oct->instr_queue[i];
+
+ if (atomic_read(&iq->instr_pending)) {
+ spin_lock_bh(&iq->lock);
+ iq->fill_cnt = 0;
+ iq->octeon_read_index = iq->host_write_index;
+ iq->stats.instr_processed +=
+ atomic_read(&iq->instr_pending);
+ lio_process_iq_request_list(oct, iq, 0);
+ spin_unlock_bh(&iq->lock);
+ }
+ }
+
+ lio_process_ordered_list(oct, 1);
+ octeon_free_sc_done_list(oct);
+ octeon_free_sc_zombie_list(oct);
+
+ fallthrough;
+ case OCT_DEV_INTR_SET_DONE:
+ /* Disable interrupts */
+ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (oct->msix_on) {
+ msix_entries = (struct msix_entry *)oct->msix_entries;
+ for (i = 0; i < oct->num_msix_irqs; i++) {
+ if (oct->ioq_vector[i].vector) {
+ irq_set_affinity_hint(
+ msix_entries[i].vector,
+ NULL);
+ free_irq(msix_entries[i].vector,
+ &oct->ioq_vector[i]);
+ oct->ioq_vector[i].vector = 0;
+ }
+ }
+ pci_disable_msix(oct->pci_dev);
+ kfree(oct->msix_entries);
+ oct->msix_entries = NULL;
+ kfree(oct->irq_name_storage);
+ oct->irq_name_storage = NULL;
+ }
+ /* Soft reset the octeon device before exiting */
+ if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
+ octeon_pci_flr(oct);
+ else
+ cn23xx_vf_ask_pf_to_do_flr(oct);
+
+ fallthrough;
+ case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
+ octeon_free_ioq_vector(oct);
+
+ fallthrough;
+ case OCT_DEV_MBOX_SETUP_DONE:
+ oct->fn_list.free_mbox(oct);
+
+ fallthrough;
+ case OCT_DEV_IN_RESET:
+ case OCT_DEV_DROQ_INIT_DONE:
+ mdelay(100);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & BIT_ULL(i)))
+ continue;
+ octeon_delete_droq(oct, i);
+ }
+
+ fallthrough;
+ case OCT_DEV_RESP_LIST_INIT_DONE:
+ octeon_delete_response_list(oct);
+
+ fallthrough;
+ case OCT_DEV_INSTR_QUEUE_INIT_DONE:
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ octeon_delete_instr_queue(oct, i);
+ }
+
+ fallthrough;
+ case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
+ octeon_free_sc_buffer_pool(oct);
+
+ fallthrough;
+ case OCT_DEV_DISPATCH_INIT_DONE:
+ octeon_delete_dispatch_list(oct);
+ cancel_delayed_work_sync(&oct->nic_poll_work.work);
+
+ fallthrough;
+ case OCT_DEV_PCI_MAP_DONE:
+ octeon_unmap_pci_barx(oct, 0);
+ octeon_unmap_pci_barx(oct, 1);
+
+ fallthrough;
+ case OCT_DEV_PCI_ENABLE_DONE:
+ pci_clear_master(oct->pci_dev);
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
+ fallthrough;
+ case OCT_DEV_BEGIN_STATE:
+ /* Nothing to be done here either */
+ break;
+ }
+
+ tasklet_kill(&oct_priv->droq_tasklet);
+}
+
+/**
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
+ */
+static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
+{
+ struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
+ struct octeon_soft_command *sc;
+ union octnet_cmd *ncmd;
+ int retval;
+
+ if (oct->props[lio->ifidx].rx_on == start_stop)
+ return 0;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
+ 16, 0);
+ if (!sc) {
+ netif_info(lio, rx_err, lio->netdev,
+ "Failed to allocate octeon_soft_command struct\n");
+ return -ENOMEM;
+ }
+
+ ncmd = (union octnet_cmd *)sc->virtdptr;
+
+ ncmd->u64 = 0;
+ ncmd->s.cmd = OCTNET_CMD_RX_CTL;
+ ncmd->s.param1 = start_stop;
+
+ octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_CMD, 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
+ octeon_free_soft_command(oct, sc);
+ } else {
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return retval;
+
+ oct->props[lio->ifidx].rx_on = start_stop;
+ WRITE_ONCE(sc->caller_is_done, true);
+ }
+
+ return retval;
+}
+
+/**
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
+ *
+ * Cleanup associated with each interface for an Octeon device when NIC
+ * module is being unloaded or if initialization fails during load.
+ */
+static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
+{
+ struct net_device *netdev = oct->props[ifidx].netdev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ struct lio *lio;
+
+ if (!netdev) {
+ dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
+ __func__, ifidx);
+ return;
+ }
+
+ lio = GET_LIO(netdev);
+
+ dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
+ liquidio_stop(netdev);
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ oct->droq[0]->ops.poll_mode = 0;
+ }
+
+ /* Delete NAPI */
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ netif_napi_del(napi);
+
+ tasklet_enable(&oct_priv->droq_tasklet);
+
+ if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
+ unregister_netdev(netdev);
+
+ cleanup_rx_oom_poll_fn(netdev);
+
+ cleanup_link_status_change_wq(netdev);
+
+ lio_delete_glists(lio);
+
+ free_netdev(netdev);
+
+ oct->props[ifidx].gmxport = -1;
+
+ oct->props[ifidx].netdev = NULL;
+}
+
+/**
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
+ */
+static int liquidio_stop_nic_module(struct octeon_device *oct)
+{
+ struct lio *lio;
+ int i, j;
+
+ dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ if (!oct->ifcount) {
+ dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
+ return 1;
+ }
+
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
+ for (i = 0; i < oct->ifcount; i++) {
+ lio = GET_LIO(oct->props[i].netdev);
+ for (j = 0; j < oct->num_oqs; j++)
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
+ }
+
+ for (i = 0; i < oct->ifcount; i++)
+ liquidio_destroy_nic_device(oct, i);
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
+ return 0;
+}
+
+/**
+ * liquidio_vf_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
+ */
+static void liquidio_vf_remove(struct pci_dev *pdev)
+{
+ struct octeon_device *oct_dev = pci_get_drvdata(pdev);
+
+ dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
+
+ if (oct_dev->app_mode == CVM_DRV_NIC_APP)
+ liquidio_stop_nic_module(oct_dev);
+
+ /* Reset the octeon device and cleanup all memory allocated for
+ * the octeon device by driver.
+ */
+ octeon_destroy_resources(oct_dev);
+
+ dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
+
+ /* This octeon device has been removed. Update the global
+ * data structure to reflect this. Free the device structure.
+ */
+ octeon_free_device_mem(oct_dev);
+}
+
+/**
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
+ */
+static int octeon_pci_os_setup(struct octeon_device *oct)
+{
+#ifdef CONFIG_PCI_IOV
+ /* setup PCI stuff first */
+ if (!oct->pci_dev->physfn)
+ octeon_pci_flr(oct);
+#endif
+
+ if (pci_enable_device(oct->pci_dev)) {
+ dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
+ return 1;
+ }
+
+ if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
+ dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
+ pci_disable_device(oct->pci_dev);
+ return 1;
+ }
+
+ /* Enable PCI DMA Master. */
+ pci_set_master(oct->pci_dev);
+
+ return 0;
+}
+
+/**
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
+ */
+static void free_netbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct sk_buff *skb;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
+ DMA_TO_DEVICE);
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
+ */
+static void free_netsgbuf(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octnic_gather *g;
+ struct sk_buff *skb;
+ int i, frags, iq;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)buf;
+ skb = finfo->skb;
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ i++;
+ }
+
+ iq = skb_iq(lio->oct_dev, skb);
+
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ tx_buffer_free(skb);
+}
+
+/**
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
+ */
+static void free_netsgbuf_with_resp(void *buf)
+{
+ struct octnet_buf_free_info *finfo;
+ struct octeon_soft_command *sc;
+ struct octnic_gather *g;
+ struct sk_buff *skb;
+ int i, frags, iq;
+ struct lio *lio;
+
+ sc = (struct octeon_soft_command *)buf;
+ skb = (struct sk_buff *)sc->callback_arg;
+ finfo = (struct octnet_buf_free_info *)&skb->cb;
+
+ lio = finfo->lio;
+ g = finfo->g;
+ frags = skb_shinfo(skb)->nr_frags;
+
+ dma_unmap_single(&lio->oct_dev->pci_dev->dev,
+ g->sg[0].ptr[0], (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+
+ i = 1;
+ while (frags--) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
+
+ dma_unmap_page(&lio->oct_dev->pci_dev->dev,
+ g->sg[(i >> 2)].ptr[(i & 3)],
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ i++;
+ }
+
+ iq = skb_iq(lio->oct_dev, skb);
+
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
+
+ /* Don't free the skb yet */
+}
+
+/**
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
+ */
+static int liquidio_open(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ int ret = 0;
+
+ if (!oct->props[lio->ifidx].napi_enabled) {
+ tasklet_disable(&oct_priv->droq_tasklet);
+
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+
+ oct->droq[0]->ops.poll_mode = 1;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
+ /* Ready for link status updates */
+ lio->intf_open = 1;
+
+ netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
+ start_txqs(netdev);
+
+ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
+ lio->stats_wk.ctxptr = lio;
+ schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
+ (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
+
+ /* tell Octeon to start forwarding packets to host */
+ ret = send_rx_ctrl_cmd(lio, 1);
+ if (ret)
+ return ret;
+
+ dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
+
+ return ret;
+}
+
+/**
+ * liquidio_stop - jNet device stop for LiquidIO
+ * @netdev: network device
+ */
+static int liquidio_stop(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octeon_device_priv *oct_priv =
+ (struct octeon_device_priv *)oct->priv;
+ struct napi_struct *napi, *n;
+ int ret = 0;
+
+ /* tell Octeon to stop forwarding packets to host */
+ ret = send_rx_ctrl_cmd(lio, 0);
+ if (ret)
+ return ret;
+
+ netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ /* Inform that netif carrier is down */
+ lio->intf_open = 0;
+ lio->linfo.link.s.link_up = 0;
+
+ netif_carrier_off(netdev);
+ lio->link_changes++;
+
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ stop_txqs(netdev);
+
+ /* Wait for any pending Rx descriptors */
+ if (lio_wait_for_clean_oq(oct))
+ netif_info(lio, rx_err, lio->netdev,
+ "Proceeding with stop interface after partial RX desc processing\n");
+
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+
+ oct->droq[0]->ops.poll_mode = 0;
+
+ tasklet_enable(&oct_priv->droq_tasklet);
+ }
+
+ cancel_delayed_work_sync(&lio->stats_wk.work);
+
+ dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
+
+ return ret;
+}
+
+/**
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
+ *
+ * This routine generates a octnet_ifflags mask from the net device flags
+ * received from the OS.
+ */
+static enum octnet_ifflags get_new_flags(struct net_device *netdev)
+{
+ enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
+
+ if (netdev->flags & IFF_PROMISC)
+ f |= OCTNET_IFFLAG_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+
+ if (netdev->flags & IFF_MULTICAST) {
+ f |= OCTNET_IFFLAG_MULTICAST;
+
+ /* Accept all multicast addresses if there are more than we
+ * can handle
+ */
+ if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
+ f |= OCTNET_IFFLAG_ALLMULTI;
+ }
+
+ if (netdev->flags & IFF_BROADCAST)
+ f |= OCTNET_IFFLAG_BROADCAST;
+
+ return f;
+}
+
+static void liquidio_set_uc_list(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct netdev_hw_addr *ha;
+ u64 *mac;
+
+ if (lio->netdev_uc_count == netdev_uc_count(netdev))
+ return;
+
+ if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
+ dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
+ return;
+ }
+
+ lio->netdev_uc_count = netdev_uc_count(netdev);
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
+ nctrl.ncmd.s.more = lio->netdev_uc_count;
+ nctrl.ncmd.s.param1 = oct->vf_num;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ mac = &nctrl.udd[0];
+ netdev_for_each_uc_addr(ha, netdev) {
+ ether_addr_copy(((u8 *)mac) + 2, ha->addr);
+ mac++;
+ }
+
+ octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+}
+
+/**
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
+ */
+static void liquidio_set_mcast_list(struct net_device *netdev)
+{
+ int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ struct netdev_hw_addr *ha;
+ u64 *mc;
+ int ret;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ /* Create a ctrl pkt command to be sent to core app. */
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
+ nctrl.ncmd.s.more = mc_count;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ /* copy all the addresses into the udd */
+ mc = &nctrl.udd[0];
+ netdev_for_each_mc_addr(ha, netdev) {
+ *mc = 0;
+ ether_addr_copy(((u8 *)mc) + 2, ha->addr);
+ /* no need to swap bytes */
+ if (++mc > &nctrl.udd[mc_count])
+ break;
+ }
+
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ /* Apparently, any activity in this call from the kernel has to
+ * be atomic. So we won't wait for response.
+ */
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ liquidio_set_uc_list(netdev);
+}
+
+/**
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: opaque pointer to sockaddr
+ */
+static int liquidio_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
+ return 0;
+
+ if (lio->linfo.macaddr_is_admin_asgnd)
+ return -EPERM;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
+ nctrl.ncmd.s.param1 = 0;
+ nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+
+ nctrl.udd[0] = 0;
+ /* The MAC Address is presented in network byte order. */
+ ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
+ return -ENOMEM;
+ }
+
+ if (nctrl.sc_status ==
+ FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
+ dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
+ return -EPERM;
+ }
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+ ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
+
+ return 0;
+}
+
+static void
+liquidio_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *lstats)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct;
+ u64 pkts = 0, drop = 0, bytes = 0;
+ struct oct_droq_stats *oq_stats;
+ struct oct_iq_stats *iq_stats;
+ int i, iq_no, oq_no;
+
+ oct = lio->oct_dev;
+
+ if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
+ return;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ iq_no = lio->linfo.txpciq[i].s.q_no;
+ iq_stats = &oct->instr_queue[iq_no]->stats;
+ pkts += iq_stats->tx_done;
+ drop += iq_stats->tx_dropped;
+ bytes += iq_stats->tx_tot_bytes;
+ }
+
+ lstats->tx_packets = pkts;
+ lstats->tx_bytes = bytes;
+ lstats->tx_dropped = drop;
+
+ pkts = 0;
+ drop = 0;
+ bytes = 0;
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
+ oq_stats = &oct->droq[oq_no]->stats;
+ pkts += oq_stats->rx_pkts_received;
+ drop += (oq_stats->rx_dropped +
+ oq_stats->dropped_nodispatch +
+ oq_stats->dropped_toomany +
+ oq_stats->dropped_nomem);
+ bytes += oq_stats->rx_bytes_received;
+ }
+
+ lstats->rx_bytes = bytes;
+ lstats->rx_packets = pkts;
+ lstats->rx_dropped = drop;
+
+ lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
+
+ /* detailed rx_errors: */
+ lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
+ /* recved pkt with crc error */
+ lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
+ /* recv'd frame alignment error */
+ lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
+
+ lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
+ lstats->rx_frame_errors;
+
+ /* detailed tx_errors */
+ lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
+ lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
+
+ lstats->tx_errors = lstats->tx_aborted_errors +
+ lstats->tx_carrier_errors;
+}
+
+/**
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
+ */
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct hwtstamp_config conf;
+
+ if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
+ return -EFAULT;
+
+ switch (conf.tx_type) {
+ case HWTSTAMP_TX_ON:
+ case HWTSTAMP_TX_OFF:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (conf.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ conf.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
+ ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ else
+ ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
+
+ return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
+}
+
+/**
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
+ */
+static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return hwtstamp_ioctl(netdev, ifr);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
+{
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octnet_buf_free_info *finfo;
+ struct oct_timestamp_resp *resp;
+ struct octeon_soft_command *sc;
+ struct lio *lio;
+
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ lio = finfo->lio;
+ sc = finfo->sc;
+ oct = lio->oct_dev;
+ resp = (struct oct_timestamp_resp *)sc->virtrptr;
+
+ if (status != OCTEON_REQUEST_DONE) {
+ dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
+ CVM_CAST64(status));
+ resp->timestamp = 0;
+ }
+
+ octeon_swap_8B_data(&resp->timestamp, 1);
+
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns = resp->timestamp;
+
+ netif_info(lio, tx_done, lio->netdev,
+ "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
+ skb, (unsigned long long)ns);
+ ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ octeon_free_soft_command(oct, sc);
+ tx_buffer_free(skb);
+}
+
+/* send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
+ */
+static int send_nic_timestamp_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ struct octnet_buf_free_info *finfo,
+ int xmit_more)
+{
+ struct octeon_soft_command *sc;
+ int ring_doorbell;
+ struct lio *lio;
+ int retval;
+ u32 len;
+
+ lio = finfo->lio;
+
+ sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
+ sizeof(struct oct_timestamp_resp));
+ finfo->sc = sc;
+
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
+ return IQ_SEND_FAILED;
+ }
+
+ if (ndata->reqtype == REQTYPE_NORESP_NET)
+ ndata->reqtype = REQTYPE_RESP_NET;
+ else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
+ ndata->reqtype = REQTYPE_RESP_NET_SG;
+
+ sc->callback = handle_timestamp;
+ sc->callback_arg = finfo->skb;
+ sc->iq_no = ndata->q_no;
+
+ len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
+
+ ring_doorbell = !xmit_more;
+
+ retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
+ sc, len, ndata->reqtype);
+
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
+ retval);
+ octeon_free_soft_command(oct, sc);
+ } else {
+ netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
+ }
+
+ return retval;
+}
+
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
+ * @returns whether the packet was transmitted to the device okay or not
+ * (NETDEV_TX_OK or NETDEV_TX_BUSY)
+ */
+static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octnet_buf_free_info *finfo;
+ union octnic_cmd_setup cmdsetup;
+ struct octnic_data_pkt ndata;
+ struct octeon_instr_irh *irh;
+ struct oct_iq_stats *stats;
+ struct octeon_device *oct;
+ int q_idx = 0, iq_no = 0;
+ union tx_info *tx_info;
+ int xmit_more = 0;
+ struct lio *lio;
+ int status = 0;
+ u64 dptr = 0;
+ u32 tag = 0;
+ int j;
+
+ lio = GET_LIO(netdev);
+ oct = lio->oct_dev;
+
+ q_idx = skb_iq(lio->oct_dev, skb);
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
+
+ stats = &oct->instr_queue[iq_no]->stats;
+
+ /* Check for all conditions in which the current packet cannot be
+ * transmitted.
+ */
+ if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
+ (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
+ lio->linfo.link.s.link_up);
+ goto lio_xmit_failed;
+ }
+
+ /* Use space in skb->cb to store info used to unmap and
+ * free the buffers.
+ */
+ finfo = (struct octnet_buf_free_info *)skb->cb;
+ finfo->lio = lio;
+ finfo->skb = skb;
+ finfo->sc = NULL;
+
+ /* Prepare the attributes for the data to be passed to OSI. */
+ memset(&ndata, 0, sizeof(struct octnic_data_pkt));
+
+ ndata.buf = finfo;
+
+ ndata.q_no = iq_no;
+
+ if (octnet_iq_is_full(oct, ndata.q_no)) {
+ /* defer sending if queue is full */
+ netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
+ ndata.q_no);
+ stats->tx_iq_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ ndata.datasize = skb->len;
+
+ cmdsetup.u64 = 0;
+ cmdsetup.s.iq_no = iq_no;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
+ }
+ }
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ cmdsetup.s.timestamp = 1;
+ }
+
+ if (!skb_shinfo(skb)->nr_frags) {
+ cmdsetup.s.u.datasize = skb->len;
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+ /* Offload checksum calculation for TCP/UDP packets */
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ ndata.cmd.cmd3.dptr = dptr;
+ finfo->dptr = dptr;
+ ndata.reqtype = REQTYPE_NORESP_NET;
+
+ } else {
+ skb_frag_t *frag;
+ struct octnic_gather *g;
+ int i, frags;
+
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)
+ lio_list_delete_head(&lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
+
+ if (!g) {
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit scatter gather: glist null!\n");
+ goto lio_xmit_failed;
+ }
+
+ cmdsetup.s.gather = 1;
+ cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
+ memset(g->sg, 0, g->sg_size);
+
+ g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ (skb->len - skb->data_len),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+ add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
+
+ frags = skb_shinfo(skb)->nr_frags;
+ i = 1;
+ while (frags--) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+
+ g->sg[(i >> 2)].ptr[(i & 3)] =
+ skb_frag_dma_map(&oct->pci_dev->dev,
+ frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
+ (i & 3));
+ i++;
+ }
+
+ dptr = g->sg_dma_ptr;
+
+ ndata.cmd.cmd3.dptr = dptr;
+ finfo->dptr = dptr;
+ finfo->g = g;
+
+ ndata.reqtype = REQTYPE_NORESP_NET_SG;
+ }
+
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
+
+ if (skb_shinfo(skb)->gso_size) {
+ tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
+ tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
+ irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
+ }
+
+ xmit_more = netdev_xmit_more();
+
+ if (unlikely(cmdsetup.s.timestamp))
+ status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
+ else
+ status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
+ if (status == IQ_SEND_FAILED)
+ goto lio_xmit_failed;
+
+ netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
+
+ if (status == IQ_SEND_STOP) {
+ dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
+ iq_no);
+ netif_stop_subqueue(netdev, q_idx);
+ }
+
+ netif_trans_update(netdev);
+
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
+ else
+ stats->tx_done++;
+ stats->tx_tot_bytes += ndata.datasize;
+
+ return NETDEV_TX_OK;
+
+lio_xmit_failed:
+ stats->tx_dropped++;
+ netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
+ iq_no, stats->tx_dropped);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+
+ octeon_ring_doorbell_locked(oct, iq_no);
+
+ tx_buffer_free(skb);
+ return NETDEV_TX_OK;
+}
+
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
+ */
+static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+{
+ struct lio *lio;
+
+ lio = GET_LIO(netdev);
+
+ netif_info(lio, tx_err, lio->netdev,
+ "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
+ netdev->stats.tx_dropped);
+ netif_trans_update(netdev);
+ wake_txqs(netdev);
+}
+
+static int
+liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)), u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)), u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+/** Sending command to enable/disable RX checksum offload
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
+ * OCTNET_CMD_RXCSUM_DISABLE
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+/** Sending command to add/delete VxLAN UDP port to firmware
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @param vxlan_port VxLAN port to be added or deleted
+ * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
+ ret);
+ if (ret > 0)
+ ret = -EIO;
+ }
+ return ret;
+}
+
+static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int entry,
+ struct udp_tunnel_info *ti)
+{
+ return liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table,
+ unsigned int entry,
+ struct udp_tunnel_info *ti)
+{
+ return liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
+static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
+ .set_port = liquidio_udp_tunnel_set_port,
+ .unset_port = liquidio_udp_tunnel_unset_port,
+ .tables = {
+ { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
+ },
+};
+
+/** \brief Net device fix features
+ * @param netdev pointer to network device
+ * @param request features requested
+ * @returns updated features list
+ */
+static netdev_features_t liquidio_fix_features(struct net_device *netdev,
+ netdev_features_t request)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if ((request & NETIF_F_RXCSUM) &&
+ !(lio->dev_capability & NETIF_F_RXCSUM))
+ request &= ~NETIF_F_RXCSUM;
+
+ if ((request & NETIF_F_HW_CSUM) &&
+ !(lio->dev_capability & NETIF_F_HW_CSUM))
+ request &= ~NETIF_F_HW_CSUM;
+
+ if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
+ request &= ~NETIF_F_TSO;
+
+ if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
+ request &= ~NETIF_F_TSO6;
+
+ if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ /* Disable LRO if RXCSUM is off */
+ if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ request &= ~NETIF_F_LRO;
+
+ return request;
+}
+
+/** \brief Net device set features
+ * @param netdev pointer to network device
+ * @param features features to enable/disable
+ */
+static int liquidio_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct lio *lio = netdev_priv(netdev);
+
+ if (!((netdev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+ else if (!(features & NETIF_F_LRO) &&
+ (lio->dev_capability & NETIF_F_LRO))
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
+
+ return 0;
+}
+
+static const struct net_device_ops lionetdevops = {
+ .ndo_open = liquidio_open,
+ .ndo_stop = liquidio_stop,
+ .ndo_start_xmit = liquidio_xmit,
+ .ndo_get_stats64 = liquidio_get_stats64,
+ .ndo_set_mac_address = liquidio_set_mac,
+ .ndo_set_rx_mode = liquidio_set_mcast_list,
+ .ndo_tx_timeout = liquidio_tx_timeout,
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
+ .ndo_change_mtu = liquidio_change_mtu,
+ .ndo_eth_ioctl = liquidio_ioctl,
+ .ndo_fix_features = liquidio_fix_features,
+ .ndo_set_features = liquidio_set_features,
+};
+
+static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ union oct_link_status *ls;
+ int gmxport = 0;
+ int i;
+
+ if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
+ dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
+ recv_pkt->buffer_size[0],
+ recv_pkt->rh.r_nic_info.gmxport);
+ goto nic_info_err;
+ }
+
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
+ ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
+ OCT_DROQ_INFO_SIZE);
+
+ octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
+
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
+
+nic_info_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+/**
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
+ *
+ * Called during init time for each device. It assumes the NIC
+ * is already up and running. The link information for each
+ * interface is passed in link_info.
+ */
+static int setup_nic_devices(struct octeon_device *octeon_dev)
+{
+ int retval, num_iqueues, num_oqueues;
+ u32 resp_size, data_size;
+ struct liquidio_if_cfg_resp *resp;
+ struct octeon_soft_command *sc;
+ union oct_nic_if_cfg if_cfg;
+ struct octdev_props *props;
+ struct net_device *netdev;
+ struct lio_version *vdata;
+ struct lio *lio = NULL;
+ u8 mac[ETH_ALEN], i, j;
+ u32 ifidx_or_pfnum;
+
+ ifidx_or_pfnum = octeon_dev->pf_num;
+
+ /* This is to handle link status changes */
+ octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
+ lio_nic_info, octeon_dev);
+
+ /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
+ * They are handled directly.
+ */
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
+ free_netbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
+ free_netsgbuf);
+
+ octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
+ free_netsgbuf_with_resp);
+
+ for (i = 0; i < octeon_dev->ifcount; i++) {
+ resp_size = sizeof(struct liquidio_if_cfg_resp);
+ data_size = sizeof(struct lio_version);
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(octeon_dev, data_size,
+ resp_size, 0);
+ resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
+ vdata = (struct lio_version *)sc->virtdptr;
+
+ *((u64 *)vdata) = 0;
+ vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
+ vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
+ vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
+
+ if_cfg.u64 = 0;
+
+ if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
+ if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
+ if_cfg.s.base_queue = 0;
+
+ sc->iq_no = 0;
+
+ octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
+ 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ retval = octeon_send_soft_command(octeon_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed status: %x\n", retval);
+ /* Soft instr is freed by driver in case of failure. */
+ octeon_free_soft_command(octeon_dev, sc);
+ return(-EIO);
+ }
+
+ /* Sleep on a wait queue till the cond flag indicates that the
+ * response arrived or timed-out.
+ */
+ retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
+ if (retval)
+ return retval;
+
+ retval = resp->status;
+ if (retval) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "iq/oq config failed, retval = %d\n", retval);
+ WRITE_ONCE(sc->caller_is_done, true);
+ return -EIO;
+ }
+
+ snprintf(octeon_dev->fw_info.liquidio_firmware_version,
+ 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
+
+ octeon_swap_8B_data((u64 *)(&resp->cfg_info),
+ (sizeof(struct liquidio_if_cfg_info)) >> 3);
+
+ num_iqueues = hweight64(resp->cfg_info.iqmask);
+ num_oqueues = hweight64(resp->cfg_info.oqmask);
+
+ if (!(num_iqueues) || !(num_oqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
+ resp->cfg_info.iqmask, resp->cfg_info.oqmask);
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ }
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
+ i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
+ num_iqueues, num_oqueues);
+
+ netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
+
+ if (!netdev) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
+ WRITE_ONCE(sc->caller_is_done, true);
+ goto setup_nic_dev_done;
+ }
+
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
+
+ /* Associate the routines that will handle different
+ * netdev tasks.
+ */
+ netdev->netdev_ops = &lionetdevops;
+
+ lio = GET_LIO(netdev);
+
+ memset(lio, 0, sizeof(struct lio));
+
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
+
+ lio->linfo.num_rxpciq = num_oqueues;
+ lio->linfo.num_txpciq = num_iqueues;
+
+ for (j = 0; j < num_oqueues; j++) {
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
+ }
+ for (j = 0; j < num_iqueues; j++) {
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
+ }
+
+ lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
+ lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
+ lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
+ lio->linfo.macaddr_is_admin_asgnd =
+ resp->cfg_info.linfo.macaddr_is_admin_asgnd;
+ lio->linfo.macaddr_spoofchk =
+ resp->cfg_info.linfo.macaddr_spoofchk;
+
+ lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
+ lio->dev_capability = NETIF_F_HIGHDMA
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_GRO
+ | NETIF_F_LRO;
+ netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
+
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features =
+ (lio->enc_dev_capability & ~NETIF_F_LRO);
+ netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
+
+ netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
+
+ netdev->hw_features = lio->dev_capability;
+ netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ /* MTU range: 68 - 16000 */
+ netdev->min_mtu = LIO_MIN_MTU_SIZE;
+ netdev->max_mtu = LIO_MAX_MTU_SIZE;
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ /* Point to the properties for octeon device to which this
+ * interface belongs.
+ */
+ lio->oct_dev = octeon_dev;
+ lio->octprops = props;
+ lio->netdev = netdev;
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "if%d gmx: %d hw_addr: 0x%llx\n", i,
+ lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
+
+ /* 64-bit swap required on LE machines */
+ octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
+ for (j = 0; j < ETH_ALEN; j++)
+ mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
+
+ /* Copy MAC Address to OS network device structure */
+ eth_hw_addr_set(netdev, mac);
+
+ if (liquidio_setup_io_queues(octeon_dev, i,
+ lio->linfo.num_txpciq,
+ lio->linfo.num_rxpciq)) {
+ dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
+
+ /* For VFs, enable Octeon device interrupts here,
+ * as this is contingent upon IO queue setup
+ */
+ octeon_dev->fn_list.enable_interrupt(octeon_dev,
+ OCTEON_ALL_INTR);
+
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+
+ lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
+ lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
+
+ if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "Gather list allocation failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ /* Register ethtool support */
+ liquidio_set_ethtool_ops(netdev);
+ if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
+ octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
+ else
+ octeon_dev->priv_flags = 0x0;
+
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ if (setup_link_status_change_wq(netdev))
+ goto setup_nic_dev_free;
+
+ if (setup_rx_oom_poll_fn(netdev))
+ goto setup_nic_dev_free;
+
+ /* Register the network device with the OS */
+ if (register_netdev(netdev)) {
+ dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
+ goto setup_nic_dev_free;
+ }
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
+ i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ netif_carrier_off(netdev);
+ lio->link_changes++;
+
+ ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup successful\n", i);
+
+ octeon_dev->no_speed_setting = 1;
+ }
+
+ return 0;
+
+setup_nic_dev_free:
+
+ while (i--) {
+ dev_err(&octeon_dev->pci_dev->dev,
+ "NIC ifidx:%d Setup failed\n", i);
+ liquidio_destroy_nic_device(octeon_dev, i);
+ }
+
+setup_nic_dev_done:
+
+ return -ENODEV;
+}
+
+/**
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
+ *
+ * This initialization routine is called once the Octeon device application is
+ * up and running
+ */
+static int liquidio_init_nic_module(struct octeon_device *oct)
+{
+ int num_nic_ports = 1;
+ int i, retval = 0;
+
+ dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
+
+ /* only default iq and oq were initialized
+ * initialize the rest as well run port_config command for each port
+ */
+ oct->ifcount = num_nic_ports;
+ memset(oct->props, 0,
+ sizeof(struct octdev_props) * num_nic_ports);
+
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
+ retval = setup_nic_devices(oct);
+ if (retval) {
+ dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
+ goto octnet_init_failure;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
+
+ return retval;
+
+octnet_init_failure:
+
+ oct->ifcount = 0;
+
+ return retval;
+}
+
+/**
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @oct: octeon device
+ */
+static int octeon_device_init(struct octeon_device *oct)
+{
+ u32 rev_id;
+ int j;
+
+ atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
+
+ /* Enable access to the octeon device and make its DMA capability
+ * known to the OS.
+ */
+ if (octeon_pci_os_setup(oct))
+ return 1;
+ atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
+
+ oct->chip_id = OCTEON_CN23XX_VF_VID;
+ pci_read_config_dword(oct->pci_dev, 8, &rev_id);
+ oct->rev_id = rev_id & 0xff;
+
+ if (cn23xx_setup_octeon_vf_device(oct))
+ return 1;
+
+ atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
+
+ oct->app_mode = CVM_DRV_NIC_APP;
+
+ /* Initialize the dispatch mechanism used to push packets arriving on
+ * Octeon Output queues.
+ */
+ if (octeon_init_dispatch_list(oct))
+ return 1;
+
+ atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
+
+ if (octeon_set_io_queues_off(oct)) {
+ dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
+ return 1;
+ }
+
+ if (oct->fn_list.setup_device_regs(oct)) {
+ dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
+ return 1;
+ }
+
+ /* Initialize soft command buffer pool */
+ if (octeon_setup_sc_buffer_pool(oct)) {
+ dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
+
+ /* Setup the data structures that manage this Octeon's Input queues. */
+ if (octeon_setup_instr_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
+
+ /* Initialize lists to manage the requests of different types that
+ * arrive from user & kernel applications for this octeon device.
+ */
+ if (octeon_setup_response_list(oct)) {
+ dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
+
+ if (octeon_setup_output_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
+
+ if (oct->fn_list.setup_mbox(oct)) {
+ dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
+
+ if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
+ dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
+ return 1;
+ }
+ atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
+
+ dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
+ oct->sriov_info.rings_per_vf);
+
+ /* Setup the interrupt handler and record the INT SUM register address*/
+ if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
+ return 1;
+
+ atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
+
+ /* ***************************************************************
+ * The interrupts need to be enabled for the PF<-->VF handshake.
+ * They are [re]-enabled after the PF<-->VF handshake so that the
+ * correct OQ tick value is used (i.e. the value retrieved from
+ * the PF as part of the handshake).
+ */
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+
+ if (cn23xx_octeon_pfvf_handshake(oct))
+ return 1;
+
+ /* Here we [re]-enable the interrupts so that the correct OQ tick value
+ * is used (i.e. the value that was retrieved during the handshake)
+ */
+
+ /* Enable Octeon device interrupts */
+ oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
+ /* *************************************************************** */
+
+ /* Enable the input and output queues for this Octeon device */
+ if (oct->fn_list.enable_io_queues(oct)) {
+ dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
+ return 1;
+ }
+
+ atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
+
+ atomic_set(&oct->status, OCT_DEV_HOST_OK);
+
+ /* Send Credit for Octeon Output queues. Credits are always sent after
+ * the output queue is enabled.
+ */
+ for (j = 0; j < oct->num_oqs; j++)
+ writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
+
+ /* Packets can start arriving on the output queues from this point. */
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ atomic_set(&oct->status, OCT_DEV_RUNNING);
+
+ if (liquidio_init_nic_module(oct))
+ return 1;
+
+ return 0;
+}
+
+static int __init liquidio_vf_init(void)
+{
+ octeon_init_device_list(0);
+ return pci_register_driver(&liquidio_vf_pci_driver);
+}
+
+static void __exit liquidio_vf_exit(void)
+{
+ pci_unregister_driver(&liquidio_vf_pci_driver);
+
+ pr_info("LiquidIO_VF network module is now unloaded\n");
+}
+
+module_init(liquidio_vf_init);
+module_exit(liquidio_vf_exit);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
new file mode 100644
index 000000000..600de587d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -0,0 +1,672 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/if_vlan.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "lio_vf_rep.h"
+
+static int lio_vf_rep_open(struct net_device *ndev);
+static int lio_vf_rep_stop(struct net_device *ndev);
+static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
+ struct net_device *ndev);
+static void lio_vf_rep_tx_timeout(struct net_device *netdev, unsigned int txqueue);
+static int lio_vf_rep_phys_port_name(struct net_device *dev,
+ char *buf, size_t len);
+static void lio_vf_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64);
+static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid);
+
+static const struct net_device_ops lio_vf_rep_ndev_ops = {
+ .ndo_open = lio_vf_rep_open,
+ .ndo_stop = lio_vf_rep_stop,
+ .ndo_start_xmit = lio_vf_rep_pkt_xmit,
+ .ndo_tx_timeout = lio_vf_rep_tx_timeout,
+ .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
+ .ndo_get_stats64 = lio_vf_rep_get_stats64,
+ .ndo_change_mtu = lio_vf_rep_change_mtu,
+ .ndo_get_port_parent_id = lio_vf_get_port_parent_id,
+};
+
+static int
+lio_vf_rep_send_soft_command(struct octeon_device *oct,
+ void *req, int req_size,
+ void *resp, int resp_size)
+{
+ int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
+ struct octeon_soft_command *sc = NULL;
+ struct lio_vf_rep_resp *rep_resp;
+ void *sc_req;
+ int err;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, req_size,
+ tot_resp_size, 0);
+ if (!sc)
+ return -ENOMEM;
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
+ memcpy(sc_req, req, req_size);
+
+ rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
+ memset(rep_resp, 0, tot_resp_size);
+ WRITE_ONCE(rep_resp->status, 1);
+
+ sc->iq_no = 0;
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
+ OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
+
+ err = octeon_send_soft_command(oct, sc);
+ if (err == IQ_SEND_FAILED)
+ goto free_buff;
+
+ err = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (err)
+ return err;
+
+ err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
+ if (err)
+ dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
+ else if (resp)
+ memcpy(resp, (rep_resp + 1), resp_size);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+ return err;
+
+free_buff:
+ octeon_free_soft_command(oct, sc);
+
+ return err;
+}
+
+static int
+lio_vf_rep_open(struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "VF_REP open failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
+ LIO_IFSTATE_RUNNING));
+
+ netif_carrier_on(ndev);
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int
+lio_vf_rep_stop(struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "VF_REP dev stop failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
+ ~LIO_IFSTATE_RUNNING));
+
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static void
+lio_vf_rep_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ netif_trans_update(ndev);
+
+ netif_wake_queue(ndev);
+}
+
+static void
+lio_vf_rep_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats64)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+
+ /* Swap tx and rx stats as VF rep is a switch port */
+ stats64->tx_packets = vf_rep->stats.rx_packets;
+ stats64->tx_bytes = vf_rep->stats.rx_bytes;
+ stats64->tx_dropped = vf_rep->stats.rx_dropped;
+
+ stats64->rx_packets = vf_rep->stats.tx_packets;
+ stats64->rx_bytes = vf_rep->stats.tx_bytes;
+ stats64->rx_dropped = vf_rep->stats.tx_dropped;
+}
+
+static int
+lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev,
+ "Change MTU failed with err %d\n", ret);
+ return -EIO;
+ }
+
+ ndev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int
+lio_vf_rep_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+ struct octeon_device *oct = vf_rep->oct;
+ int ret;
+
+ ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
+ vf_rep->ifidx - oct->pf_num * 64 - 1);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static struct net_device *
+lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
+{
+ int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
+ int vfid_mask = max_vfs - 1;
+
+ if (ifidx <= oct->pf_num * max_vfs ||
+ ifidx >= oct->pf_num * max_vfs + max_vfs)
+ return NULL;
+
+ /* ifidx 1-63 for PF0 VFs
+ * ifidx 65-127 for PF1 VFs
+ */
+ vf_id = (ifidx & vfid_mask) - 1;
+
+ return oct->vf_rep_list.ndev[vf_id];
+}
+
+static void
+lio_vf_rep_copy_packet(struct octeon_device *oct,
+ struct sk_buff *skb,
+ int len)
+{
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+
+ skb_copy_to_linear_data(skb, page_address(pg_info->page) +
+ pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+}
+
+static int
+lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
+{
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *vf_ndev;
+ struct octeon_device *oct;
+ union octeon_rh *rh;
+ struct sk_buff *skb;
+ int i, ifidx;
+
+ oct = lio_get_device(recv_pkt->octeon_id);
+ if (!oct)
+ goto free_buffers;
+
+ skb = recv_pkt->buffer_ptr[0];
+ rh = &recv_pkt->rh;
+ ifidx = rh->r.ossp;
+
+ vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
+ if (!vf_ndev)
+ goto free_buffers;
+
+ vf_rep = netdev_priv(vf_ndev);
+ if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
+ recv_pkt->buffer_count > 1)
+ goto free_buffers;
+
+ skb->dev = vf_ndev;
+
+ /* Multiple buffers are not used for vf_rep packets.
+ * So just buffer_size[0] is valid.
+ */
+ lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
+
+ skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(skb);
+
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+
+free_buffers:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+
+ octeon_free_recv_info(recv_info);
+
+ return 0;
+}
+
+static void
+lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
+ u32 status, void *buf)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
+ struct sk_buff *skb = sc->ctxptr;
+ struct net_device *ndev = skb->dev;
+ u32 iq_no;
+
+ dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
+ sc->datasize, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ iq_no = sc->iq_no;
+ octeon_free_soft_command(oct, sc);
+
+ if (octnet_iq_is_full(oct, iq_no))
+ return;
+
+ if (netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+}
+
+static netdev_tx_t
+lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
+ struct net_device *parent_ndev = vf_rep->parent_ndev;
+ struct octeon_device *oct = vf_rep->oct;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_soft_command *sc;
+ struct lio *parent_lio;
+ int status;
+
+ parent_lio = GET_LIO(parent_ndev);
+
+ if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
+ skb->len <= 0)
+ goto xmit_failed;
+
+ if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
+ dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, 16, 0);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
+ goto xmit_failed;
+ }
+
+ /* Multiple buffers are not used for vf_rep packets. */
+ if (skb_shinfo(skb)->nr_frags != 0) {
+ dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
+ octeon_free_soft_command(oct, sc);
+ goto xmit_failed;
+ }
+
+ sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
+ dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
+ octeon_free_soft_command(oct, sc);
+ goto xmit_failed;
+ }
+
+ sc->virtdptr = skb->data;
+ sc->datasize = skb->len;
+ sc->ctxptr = skb;
+ sc->iq_no = parent_lio->txq;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
+ vf_rep->ifidx, 0, 0);
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+ pki_ih3->tagtype = ORDERED_TAG;
+
+ sc->callback = lio_vf_rep_packet_sent_callback;
+ sc->callback_arg = sc;
+
+ status = octeon_send_soft_command(oct, sc);
+ if (status == IQ_SEND_FAILED) {
+ dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
+ sc->datasize, DMA_TO_DEVICE);
+ octeon_free_soft_command(oct, sc);
+ goto xmit_failed;
+ }
+
+ if (status == IQ_SEND_STOP)
+ netif_stop_queue(ndev);
+
+ netif_trans_update(ndev);
+
+ return NETDEV_TX_OK;
+
+xmit_failed:
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static int lio_vf_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
+ struct net_device *parent_ndev = vf_rep->parent_ndev;
+ struct lio *lio = GET_LIO(parent_ndev);
+
+ ppid->id_len = ETH_ALEN;
+ ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
+
+ return 0;
+}
+
+static void
+lio_vf_rep_fetch_stats(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
+ struct lio_vf_rep_stats stats;
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ oct = vf_rep->oct;
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
+ rep_cfg.ifidx = vf_rep->ifidx;
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
+ &stats, sizeof(stats));
+
+ if (!ret) {
+ octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
+ memcpy(&vf_rep->stats, &stats, sizeof(stats));
+ }
+
+ schedule_delayed_work(&vf_rep->stats_wk.work,
+ msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
+}
+
+int
+lio_vf_rep_create(struct octeon_device *oct)
+{
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *ndev;
+ int i, num_vfs;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return 0;
+
+ if (!oct->sriov_info.sriov_enabled)
+ return 0;
+
+ num_vfs = oct->sriov_info.num_vfs_alloced;
+
+ oct->vf_rep_list.num_vfs = 0;
+ for (i = 0; i < num_vfs; i++) {
+ ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
+
+ if (!ndev) {
+ dev_err(&oct->pci_dev->dev,
+ "VF rep device %d creation failed\n", i);
+ goto cleanup;
+ }
+
+ ndev->min_mtu = LIO_MIN_MTU_SIZE;
+ ndev->max_mtu = LIO_MAX_MTU_SIZE;
+ ndev->netdev_ops = &lio_vf_rep_ndev_ops;
+
+ vf_rep = netdev_priv(ndev);
+ memset(vf_rep, 0, sizeof(*vf_rep));
+
+ vf_rep->ndev = ndev;
+ vf_rep->oct = oct;
+ vf_rep->parent_ndev = oct->props[0].netdev;
+ vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
+
+ eth_hw_addr_random(ndev);
+
+ if (register_netdev(ndev)) {
+ dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
+
+ free_netdev(ndev);
+ goto cleanup;
+ }
+
+ netif_carrier_off(ndev);
+
+ INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
+ lio_vf_rep_fetch_stats);
+ vf_rep->stats_wk.ctxptr = (void *)vf_rep;
+ schedule_delayed_work(&vf_rep->stats_wk.work,
+ msecs_to_jiffies
+ (LIO_VF_REP_STATS_POLL_TIME_MS));
+ oct->vf_rep_list.num_vfs++;
+ oct->vf_rep_list.ndev[i] = ndev;
+ }
+
+ if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
+ OPCODE_NIC_VF_REP_PKT,
+ lio_vf_rep_pkt_recv, oct)) {
+ dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
+
+ goto cleanup;
+ }
+
+ return 0;
+
+cleanup:
+ for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
+ ndev = oct->vf_rep_list.ndev[i];
+ oct->vf_rep_list.ndev[i] = NULL;
+ if (ndev) {
+ vf_rep = netdev_priv(ndev);
+ cancel_delayed_work_sync
+ (&vf_rep->stats_wk.work);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ }
+ }
+
+ oct->vf_rep_list.num_vfs = 0;
+
+ return -1;
+}
+
+void
+lio_vf_rep_destroy(struct octeon_device *oct)
+{
+ struct lio_vf_rep_desc *vf_rep;
+ struct net_device *ndev;
+ int i;
+
+ if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return;
+
+ if (!oct->sriov_info.sriov_enabled)
+ return;
+
+ for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
+ ndev = oct->vf_rep_list.ndev[i];
+ oct->vf_rep_list.ndev[i] = NULL;
+ if (ndev) {
+ vf_rep = netdev_priv(ndev);
+ cancel_delayed_work_sync
+ (&vf_rep->stats_wk.work);
+ netif_tx_disable(ndev);
+ netif_carrier_off(ndev);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ }
+ }
+
+ oct->vf_rep_list.num_vfs = 0;
+}
+
+static int
+lio_vf_rep_netdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct lio_vf_rep_desc *vf_rep;
+ struct lio_vf_rep_req rep_cfg;
+ struct octeon_device *oct;
+ int ret;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ case NETDEV_CHANGENAME:
+ break;
+
+ default:
+ return NOTIFY_DONE;
+ }
+
+ if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
+ return NOTIFY_DONE;
+
+ vf_rep = netdev_priv(ndev);
+ oct = vf_rep->oct;
+
+ if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
+ dev_err(&oct->pci_dev->dev,
+ "Device name change sync failed as the size is > %d\n",
+ LIO_IF_NAME_SIZE);
+ return NOTIFY_DONE;
+ }
+
+ memset(&rep_cfg, 0, sizeof(rep_cfg));
+ rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
+ rep_cfg.ifidx = vf_rep->ifidx;
+ strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
+
+ ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
+ sizeof(rep_cfg), NULL, 0);
+ if (ret)
+ dev_err(&oct->pci_dev->dev,
+ "vf_rep netdev name change failed with err %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lio_vf_rep_netdev_notifier = {
+ .notifier_call = lio_vf_rep_netdev_event,
+};
+
+int
+lio_vf_rep_modinit(void)
+{
+ if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
+ pr_err("netdev notifier registration failed\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+void
+lio_vf_rep_modexit(void)
+{
+ if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
+ pr_err("netdev notifier unregister failed\n");
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h
new file mode 100644
index 000000000..bb3cedc63
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.h
@@ -0,0 +1,49 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2017 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium, Inc. for more information
+ **********************************************************************/
+
+/*! \file octeon_vf_main.h
+ * \brief Host Driver: This file defines vf_rep related macros and structures
+ */
+#ifndef __LIO_VF_REP_H__
+#define __LIO_VF_REP_H__
+#define LIO_VF_REP_REQ_TMO_MS 5000
+#define LIO_VF_REP_STATS_POLL_TIME_MS 200
+
+struct lio_vf_rep_desc {
+ struct net_device *parent_ndev;
+ struct net_device *ndev;
+ struct octeon_device *oct;
+ struct lio_vf_rep_stats stats;
+ struct cavium_wk stats_wk;
+ atomic_t ifstate;
+ int ifidx;
+};
+
+struct lio_vf_rep_sc_ctx {
+ struct completion complete;
+};
+
+int lio_vf_rep_create(struct octeon_device *oct);
+void lio_vf_rep_destroy(struct octeon_device *oct);
+int lio_vf_rep_modinit(void);
+void lio_vf_rep_modexit(void);
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
new file mode 100644
index 000000000..4da90757c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -0,0 +1,1039 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file liquidio_common.h
+ * \brief Common: Structures and macros used in PCI-NIC package by core and
+ * host driver.
+ */
+
+#ifndef __LIQUIDIO_COMMON_H__
+#define __LIQUIDIO_COMMON_H__
+
+#include "octeon_config.h"
+
+#define LIQUIDIO_BASE_MAJOR_VERSION 1
+#define LIQUIDIO_BASE_MINOR_VERSION 7
+#define LIQUIDIO_BASE_MICRO_VERSION 2
+#define LIQUIDIO_BASE_VERSION __stringify(LIQUIDIO_BASE_MAJOR_VERSION) "." \
+ __stringify(LIQUIDIO_BASE_MINOR_VERSION)
+
+struct lio_version {
+ u16 major;
+ u16 minor;
+ u16 micro;
+ u16 reserved;
+};
+
+#define CONTROL_IQ 0
+/** Tag types used by Octeon cores in its work. */
+enum octeon_tag_type {
+ ORDERED_TAG = 0,
+ ATOMIC_TAG = 1,
+ NULL_TAG = 2,
+ NULL_NULL_TAG = 3
+};
+
+/* pre-defined host->NIC tag values */
+#define LIO_CONTROL (0x11111110)
+#define LIO_DATA(i) (0x11111111 + (i))
+
+/* Opcodes used by host driver/apps to perform operations on the core.
+ * These are used to identify the major subsystem that the operation
+ * is for.
+ */
+#define OPCODE_CORE 0 /* used for generic core operations */
+#define OPCODE_NIC 1 /* used for NIC operations */
+/* Subcodes are used by host driver/apps to identify the sub-operation
+ * for the core. They only need to by unique for a given subsystem.
+ */
+#define OPCODE_SUBCODE(op, sub) ((((op) & 0x0f) << 8) | ((sub) & 0x7f))
+
+/** OPCODE_CORE subcodes. For future use. */
+
+/** OPCODE_NIC subcodes */
+
+/* This subcode is sent by core PCI driver to indicate cores are ready. */
+#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01
+#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */
+#define OPCODE_NIC_CMD 0x03
+#define OPCODE_NIC_INFO 0x04
+#define OPCODE_NIC_PORT_STATS 0x05
+#define OPCODE_NIC_MDIO45 0x06
+#define OPCODE_NIC_TIMESTAMP 0x07
+#define OPCODE_NIC_INTRMOD_CFG 0x08
+#define OPCODE_NIC_IF_CFG 0x09
+#define OPCODE_NIC_VF_DRV_NOTICE 0x0A
+#define OPCODE_NIC_INTRMOD_PARAMS 0x0B
+#define OPCODE_NIC_QCOUNT_UPDATE 0x12
+#define OPCODE_NIC_SET_TRUSTED_VF 0x13
+#define OPCODE_NIC_SYNC_OCTEON_TIME 0x14
+#define VF_DRV_LOADED 1
+#define VF_DRV_REMOVED -1
+#define VF_DRV_MACADDR_CHANGED 2
+
+#define OPCODE_NIC_VF_REP_PKT 0x15
+#define OPCODE_NIC_VF_REP_CMD 0x16
+#define OPCODE_NIC_UBOOT_CTL 0x17
+
+#define CORE_DRV_TEST_SCATTER_OP 0xFFF5
+
+/* Application codes advertised by the core driver initialization packet. */
+#define CVM_DRV_APP_START 0x0
+#define CVM_DRV_NO_APP 0
+#define CVM_DRV_APP_COUNT 0x2
+#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0)
+#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1)
+#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2)
+#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
+
+#define BYTES_PER_DHLEN_UNIT 8
+#define MAX_REG_CNT 2000000U
+#define INTRNAMSIZ 32
+#define IRQ_NAME_OFF(i) ((i) * INTRNAMSIZ)
+#define MAX_IOQ_INTERRUPTS_PER_PF (64 * 2)
+#define MAX_IOQ_INTERRUPTS_PER_VF (8 * 2)
+
+#define SCR2_BIT_FW_LOADED 63
+
+/* App specific capabilities from firmware to pf driver */
+#define LIQUIDIO_TIME_SYNC_CAP 0x1
+#define LIQUIDIO_SWITCHDEV_CAP 0x2
+#define LIQUIDIO_SPOOFCHK_CAP 0x4
+
+/* error status return from firmware */
+#define OCTEON_REQUEST_NO_PERMISSION 0xc
+
+static inline u32 incr_index(u32 index, u32 count, u32 max)
+{
+ if ((index + count) >= max)
+ index = index + count - max;
+ else
+ index += count;
+
+ return index;
+}
+
+#define OCT_BOARD_NAME 32
+#define OCT_SERIAL_LEN 64
+
+/* Structure used by core driver to send indication that the Octeon
+ * application is ready.
+ */
+struct octeon_core_setup {
+ u64 corefreq;
+
+ char boardname[OCT_BOARD_NAME];
+
+ char board_serial_number[OCT_SERIAL_LEN];
+
+ u64 board_rev_major;
+
+ u64 board_rev_minor;
+
+};
+
+/*--------------------------- SCATTER GATHER ENTRY -----------------------*/
+
+/* The Scatter-Gather List Entry. The scatter or gather component used with
+ * a Octeon input instruction has this format.
+ */
+struct octeon_sg_entry {
+ /** The first 64 bit gives the size of data in each dptr.*/
+ union {
+ u16 size[4];
+ u64 size64;
+ } u;
+
+ /** The 4 dptr pointers for this entry. */
+ u64 ptr[4];
+
+};
+
+#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry))
+
+/* \brief Add size to gather list
+ * @param sg_entry scatter/gather entry
+ * @param size size to add
+ * @param pos position to add it.
+ */
+static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
+ u16 size,
+ u32 pos)
+{
+#ifdef __BIG_ENDIAN_BITFIELD
+ sg_entry->u.size[pos] = size;
+#else
+ sg_entry->u.size[3 - pos] = size;
+#endif
+}
+
+/*------------------------- End Scatter/Gather ---------------------------*/
+
+#define OCTNET_FRM_LENGTH_SIZE 8
+
+#define OCTNET_FRM_PTP_HEADER_SIZE 8
+
+#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE 64
+
+#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
+
+#define OCTNET_DEFAULT_MTU (1500)
+#define OCTNET_DEFAULT_FRM_SIZE (OCTNET_DEFAULT_MTU + OCTNET_FRM_HEADER_SIZE)
+
+/** NIC Commands are sent using this Octeon Input Queue */
+#define OCTNET_CMD_Q 0
+
+/* NIC Command types */
+#define OCTNET_CMD_CHANGE_MTU 0x1
+#define OCTNET_CMD_CHANGE_MACADDR 0x2
+#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3
+#define OCTNET_CMD_RX_CTL 0x4
+
+#define OCTNET_CMD_SET_MULTI_LIST 0x5
+#define OCTNET_CMD_CLEAR_STATS 0x6
+
+/* command for setting the speed, duplex & autoneg */
+#define OCTNET_CMD_SET_SETTINGS 0x7
+#define OCTNET_CMD_SET_FLOW_CTL 0x8
+
+#define OCTNET_CMD_MDIO_READ_WRITE 0x9
+#define OCTNET_CMD_GPIO_ACCESS 0xA
+#define OCTNET_CMD_LRO_ENABLE 0xB
+#define OCTNET_CMD_LRO_DISABLE 0xC
+#define OCTNET_CMD_SET_RSS 0xD
+#define OCTNET_CMD_WRITE_SA 0xE
+#define OCTNET_CMD_DELETE_SA 0xF
+#define OCTNET_CMD_UPDATE_SA 0x12
+
+#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10
+#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11
+#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13
+#define OCTNET_CMD_VERBOSE_ENABLE 0x14
+#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+
+#define OCTNET_CMD_VLAN_FILTER_CTL 0x16
+#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
+#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
+#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+
+#define OCTNET_CMD_ID_ACTIVE 0x1a
+
+#define OCTNET_CMD_SET_UC_LIST 0x1b
+#define OCTNET_CMD_SET_VF_LINKSTATE 0x1c
+
+#define OCTNET_CMD_QUEUE_COUNT_CTL 0x1f
+
+#define OCTNET_CMD_GROUP1 1
+#define OCTNET_CMD_SET_VF_SPOOFCHK 0x1
+#define OCTNET_GROUP1_LAST_CMD OCTNET_CMD_SET_VF_SPOOFCHK
+
+#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
+#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
+#define OCTNET_CMD_RXCSUM_ENABLE 0x0
+#define OCTNET_CMD_RXCSUM_DISABLE 0x1
+#define OCTNET_CMD_TXCSUM_ENABLE 0x0
+#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_ENABLE 0x1
+#define OCTNET_CMD_VLAN_FILTER_DISABLE 0x0
+
+#define OCTNET_CMD_FAIL 0x1
+
+#define SEAPI_CMD_FEC_SET 0x0
+#define SEAPI_CMD_FEC_SET_DISABLE 0x0
+#define SEAPI_CMD_FEC_SET_RS 0x1
+#define SEAPI_CMD_FEC_GET 0x1
+
+#define SEAPI_CMD_SPEED_SET 0x2
+#define SEAPI_CMD_SPEED_GET 0x3
+
+#define OPCODE_NIC_VF_PORT_STATS 0x22
+
+#define LIO_CMD_WAIT_TM 100
+
+/* RX(packets coming from wire) Checksum verification flags */
+/* TCP/UDP csum */
+#define CNNIC_L4SUM_VERIFIED 0x1
+#define CNNIC_IPSUM_VERIFIED 0x2
+#define CNNIC_TUN_CSUM_VERIFIED 0x4
+#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED)
+
+/*LROIPV4 and LROIPV6 Flags*/
+#define OCTNIC_LROIPV4 0x1
+#define OCTNIC_LROIPV6 0x2
+
+/* Interface flags communicated between host driver and core app. */
+enum octnet_ifflags {
+ OCTNET_IFFLAG_PROMISC = 0x01,
+ OCTNET_IFFLAG_ALLMULTI = 0x02,
+ OCTNET_IFFLAG_MULTICAST = 0x04,
+ OCTNET_IFFLAG_BROADCAST = 0x08,
+ OCTNET_IFFLAG_UNICAST = 0x10
+};
+
+/* wqe
+ * --------------- 0
+ * | wqe word0-3 |
+ * --------------- 32
+ * | PCI IH |
+ * --------------- 40
+ * | RPTR |
+ * --------------- 48
+ * | PCI IRH |
+ * --------------- 56
+ * | OCT_NET_CMD |
+ * --------------- 64
+ * | Addtl 8-BData |
+ * | |
+ * ---------------
+ */
+
+union octnet_cmd {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 cmd:5;
+
+ u64 more:6; /* How many udd words follow the command */
+
+ u64 cmdgroup:8;
+ u64 reserved:21;
+
+ u64 param1:16;
+
+ u64 param2:8;
+
+#else
+
+ u64 param2:8;
+
+ u64 param1:16;
+
+ u64 reserved:21;
+ u64 cmdgroup:8;
+
+ u64 more:6;
+
+ u64 cmd:5;
+
+#endif
+ } s;
+
+};
+
+#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+
+/*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+#define LIO_SOFTCMDRESP_IH2 40
+#define LIO_SOFTCMDRESP_IH3 (40 + 8)
+
+#define LIO_PCICMD_O2 24
+#define LIO_PCICMD_O3 (24 + 8)
+
+/* Instruction Header(DPI) - for OCTEON-III models */
+struct octeon_instr_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved1 */
+ u64 reserved1:32;
+
+#else
+ /** Reserved1 */
+ u64 reserved1:32;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+#endif
+};
+
+/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
+/** BIG ENDIAN format. */
+struct octeon_instr_pki_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Wider bit */
+ u64 w:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Tag Value */
+ u64 tag:32;
+
+#else
+
+ /** Tag Value */
+ u64 tag:32;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Wider bit */
+ u64 w:1;
+#endif
+
+};
+
+/** Instruction Header */
+struct octeon_instr_ih2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Tag Value */
+ u64 tag:32;
+#else
+ /** Tag Value */
+ u64 tag:32;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Short Raw Packet Indicator 1=short raw pkt */
+ u64 rs:1;
+
+ /** Core group selection (1 of 16) */
+ u64 grp:4;
+
+ /** Packet Order / Work Unit selection (1 of 8)*/
+ u64 qos:3;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+#endif
+};
+
+/** Input Request Header */
+struct octeon_instr_irh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 opcode:4;
+ u64 rflag:1;
+ u64 subcode:7;
+ u64 vlan:12;
+ u64 priority:3;
+ u64 reserved:5;
+ u64 ossp:32; /* opcode/subcode specific parameters */
+#else
+ u64 ossp:32; /* opcode/subcode specific parameters */
+ u64 reserved:5;
+ u64 priority:3;
+ u64 vlan:12;
+ u64 subcode:7;
+ u64 rflag:1;
+ u64 opcode:4;
+#endif
+};
+
+/** Return Data Parameters */
+struct octeon_instr_rdp {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:49;
+ u64 pcie_port:3;
+ u64 rlen:12;
+#else
+ u64 rlen:12;
+ u64 pcie_port:3;
+ u64 reserved:49;
+#endif
+};
+
+/** Receive Header */
+union octeon_rh {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 u64;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:17;
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ } r;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 extra:28;
+ u64 vlan:12;
+ u64 priority:3;
+ u64 csum_verified:3; /** checksum verified. */
+ u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ u64 encap_on:1;
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
+ } r_dh;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:11;
+ u64 num_gmx_ports:8;
+ u64 max_nic_ports:10;
+ u64 app_cap_flags:4;
+ u64 app_mode:8;
+ u64 pkind:8;
+ } r_core_drv_init;
+ struct {
+ u64 opcode:4;
+ u64 subcode:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:8;
+ u64 extra:25;
+ u64 gmxport:16;
+ } r_nic_info;
+#else
+ u64 u64;
+ struct {
+ u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 reserved:17;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r;
+ struct {
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
+ u64 encap_on:1;
+ u64 has_hwtstamp:1; /** 1 = has hwtstamp */
+ u64 csum_verified:3; /** checksum verified. */
+ u64 priority:3;
+ u64 vlan:12;
+ u64 extra:28;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_dh;
+ struct {
+ u64 pkind:8;
+ u64 app_mode:8;
+ u64 app_cap_flags:4;
+ u64 max_nic_ports:10;
+ u64 num_gmx_ports:8;
+ u64 reserved:11;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_core_drv_init;
+ struct {
+ u64 gmxport:16;
+ u64 extra:25;
+ u64 reserved:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 subcode:8;
+ u64 opcode:4;
+ } r_nic_info;
+#endif
+};
+
+#define OCT_RH_SIZE (sizeof(union octeon_rh))
+
+union octnic_packet_params {
+ u32 u32;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved:24;
+ u32 ip_csum:1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ u32 transport_csum:1;
+ /* Find tunnel, and perform transport csum. */
+ u32 tnl_csum:1;
+ u32 tsflag:1; /* Timestamp this packet */
+ u32 ipsec_ops:4; /* IPsec operation */
+#else
+ u32 ipsec_ops:4;
+ u32 tsflag:1;
+ u32 tnl_csum:1;
+ u32 transport_csum:1;
+ u32 ip_csum:1;
+ u32 reserved:24;
+#endif
+ } s;
+};
+
+/** Status of a RGMII Link on Octeon as seen by core driver. */
+union oct_link_status {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 duplex:8;
+ u64 mtu:16;
+ u64 speed:16;
+ u64 link_up:1;
+ u64 autoneg:1;
+ u64 if_mode:5;
+ u64 pause:1;
+ u64 flashing:1;
+ u64 phy_type:5;
+ u64 reserved:10;
+#else
+ u64 reserved:10;
+ u64 phy_type:5;
+ u64 flashing:1;
+ u64 pause:1;
+ u64 if_mode:5;
+ u64 autoneg:1;
+ u64 link_up:1;
+ u64 speed:16;
+ u64 mtu:16;
+ u64 duplex:8;
+#endif
+ } s;
+};
+
+enum lio_phy_type {
+ LIO_PHY_PORT_TP = 0x0,
+ LIO_PHY_PORT_FIBRE = 0x1,
+ LIO_PHY_PORT_UNKNOWN,
+};
+
+/** The txpciq info passed to host from the firmware */
+
+union oct_txpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 port:8;
+ u64 pkind:6;
+ u64 use_qpg:1;
+ u64 qpg:11;
+ u64 reserved0:10;
+ u64 ctrl_qpg:11;
+ u64 reserved:9;
+#else
+ u64 reserved:9;
+ u64 ctrl_qpg:11;
+ u64 reserved0:10;
+ u64 qpg:11;
+ u64 use_qpg:1;
+ u64 pkind:6;
+ u64 port:8;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+
+union oct_rxpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 reserved:56;
+#else
+ u64 reserved:56;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
+/** Information for a OCTEON ethernet interface shared between core & host. */
+struct oct_link_info {
+ union oct_link_status link;
+ u64 hw_addr;
+
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 gmxport:16;
+ u64 macaddr_is_admin_asgnd:1;
+ u64 rsvd:13;
+ u64 macaddr_spoofchk:1;
+ u64 rsvd1:17;
+ u64 num_txpciq:8;
+ u64 num_rxpciq:8;
+#else
+ u64 num_rxpciq:8;
+ u64 num_txpciq:8;
+ u64 rsvd1:17;
+ u64 macaddr_spoofchk:1;
+ u64 rsvd:13;
+ u64 macaddr_is_admin_asgnd:1;
+ u64 gmxport:16;
+#endif
+
+ union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
+ union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
+};
+
+#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
+
+struct liquidio_if_cfg_info {
+ u64 iqmask; /** mask for IQs enabled for the port */
+ u64 oqmask; /** mask for OQs enabled for the port */
+ struct oct_link_info linfo; /** initial link information */
+ char liquidio_firmware_version[32];
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_rx_stats {
+ /* link-level stats */
+ u64 total_rcvd; /* Received packets */
+ u64 bytes_rcvd; /* Octets of received packets */
+ u64 total_bcst; /* Number of non-dropped L2 broadcast packets */
+ u64 total_mcst; /* Number of non-dropped L2 multicast packets */
+ u64 runts; /* Packets shorter than allowed */
+ u64 ctl_rcvd; /* Received PAUSE packets */
+ u64 fifo_err; /* Packets dropped due to RX FIFO full */
+ u64 dmac_drop; /* Packets dropped by the DMAC filter */
+ u64 fcs_err; /* Sum of fragment, overrun, and FCS errors */
+ u64 jabber_err; /* Packets larger than allowed */
+ u64 l2_err; /* Sum of DMA, parity, PCAM access, no memory,
+ * buffer overflow, malformed L2 header or
+ * length, oversize errors
+ **/
+ u64 frame_err; /* Sum of IPv4 and L4 checksum errors */
+ u64 red_drops; /* Packets dropped by RED due to buffer
+ * exhaustion
+ **/
+
+ /* firmware stats */
+ u64 fw_total_rcvd;
+ u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast;
+ u64 fw_total_bcast;
+
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+ u64 fw_rx_vxlan;
+ u64 fw_rx_vxlan_err;
+
+ /* LRO */
+ u64 fw_lro_pkts; /* Number of packets that are LROed */
+ u64 fw_lro_octs; /* Number of octets that are LROed */
+ u64 fw_total_lro; /* Number of LRO packets formed */
+ u64 fw_lro_aborts; /* Number of times LRO of packet aborted */
+ u64 fw_lro_aborts_port;
+ u64 fw_lro_aborts_seq;
+ u64 fw_lro_aborts_tsval;
+ u64 fw_lro_aborts_timer; /* Timer setting error */
+ /* intrmod: packet forward rate */
+ u64 fwd_rate;
+};
+
+/** Stats for each NIC port in RX direction. */
+struct nic_tx_stats {
+ /* link-level stats */
+ u64 total_pkts_sent; /* Total frames sent on the interface */
+ u64 total_bytes_sent; /* Total octets sent on the interface */
+ u64 mcast_pkts_sent; /* Packets sent to the multicast DMAC */
+ u64 bcast_pkts_sent; /* Packets sent to a broadcast DMAC */
+ u64 ctl_sent; /* Control/PAUSE packets sent */
+ u64 one_collision_sent; /* Packets sent that experienced a
+ * single collision before successful
+ * transmission
+ **/
+ u64 multi_collision_sent; /* Packets sent that experienced
+ * multiple collisions before successful
+ * transmission
+ **/
+ u64 max_collision_fail; /* Packets dropped due to excessive
+ * collisions
+ **/
+ u64 max_deferral_fail; /* Packets not sent due to max
+ * deferrals
+ **/
+ u64 fifo_err; /* Packets sent that experienced a
+ * transmit underflow and were
+ * truncated
+ **/
+ u64 runts; /* Packets sent with an octet count
+ * lessthan 64
+ **/
+ u64 total_collisions; /* Packets dropped due to excessive
+ * collisions
+ **/
+
+ /* firmware stats */
+ u64 fw_total_sent;
+ u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
+ u64 fw_total_mcast_sent;
+ u64 fw_total_bcast_sent;
+ u64 fw_err_pko;
+ u64 fw_err_link;
+ u64 fw_err_drop;
+ u64 fw_err_tso;
+ u64 fw_tso; /* number of tso requests */
+ u64 fw_tso_fwd; /* number of packets segmented in tso */
+ u64 fw_tx_vxlan;
+ u64 fw_err_pki;
+};
+
+struct oct_link_stats {
+ struct nic_rx_stats fromwire;
+ struct nic_tx_stats fromhost;
+
+};
+
+static inline int opcode_slow_path(union octeon_rh *rh)
+{
+ u16 subcode1, subcode2;
+
+ subcode1 = OPCODE_SUBCODE((rh)->r.opcode, (rh)->r.subcode);
+ subcode2 = OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA);
+
+ return (subcode2 != subcode1);
+}
+
+#define LIO68XX_LED_CTRL_ADDR 0x3501
+#define LIO68XX_LED_CTRL_CFGON 0x1f
+#define LIO68XX_LED_CTRL_CFGOFF 0x100
+#define LIO68XX_LED_BEACON_ADDR 0x3508
+#define LIO68XX_LED_BEACON_CFGON 0x47fd
+#define LIO68XX_LED_BEACON_CFGOFF 0x11fc
+#define VITESSE_PHY_GPIO_DRIVEON 0x1
+#define VITESSE_PHY_GPIO_CFG 0x8
+#define VITESSE_PHY_GPIO_DRIVEOFF 0x4
+#define VITESSE_PHY_GPIO_HIGH 0x2
+#define VITESSE_PHY_GPIO_LOW 0x3
+#define LED_IDENTIFICATION_ON 0x1
+#define LED_IDENTIFICATION_OFF 0x0
+#define LIO23XX_COPPERHEAD_LED_GPIO 0x2
+
+struct oct_mdio_cmd {
+ u64 op;
+ u64 mdio_addr;
+ u64 value1;
+ u64 value2;
+ u64 value3;
+};
+
+#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+
+struct oct_intrmod_cfg {
+ u64 rx_enable;
+ u64 tx_enable;
+ u64 check_intrvl;
+ u64 maxpkt_ratethr;
+ u64 minpkt_ratethr;
+ u64 rx_maxcnt_trigger;
+ u64 rx_mincnt_trigger;
+ u64 rx_maxtmr_trigger;
+ u64 rx_mintmr_trigger;
+ u64 tx_mincnt_trigger;
+ u64 tx_maxcnt_trigger;
+ u64 rx_frames;
+ u64 tx_frames;
+ u64 rx_usecs;
+};
+
+#define BASE_QUEUE_NOT_REQUESTED 65535
+
+union oct_nic_if_cfg {
+ u64 u64;
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 base_queue:16;
+ u64 num_iqueues:16;
+ u64 num_oqueues:16;
+ u64 gmx_port_id:8;
+ u64 vf_id:8;
+#else
+ u64 vf_id:8;
+ u64 gmx_port_id:8;
+ u64 num_oqueues:16;
+ u64 num_iqueues:16;
+ u64 base_queue:16;
+#endif
+ } s;
+};
+
+struct lio_trusted_vf {
+ uint64_t active: 1;
+ uint64_t id : 8;
+ uint64_t reserved: 55;
+};
+
+struct lio_time {
+ s64 sec; /* seconds */
+ s64 nsec; /* nanoseconds */
+};
+
+struct lio_vf_rep_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_dropped;
+
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 rx_dropped;
+};
+
+enum lio_vf_rep_req_type {
+ LIO_VF_REP_REQ_NONE,
+ LIO_VF_REP_REQ_STATE,
+ LIO_VF_REP_REQ_MTU,
+ LIO_VF_REP_REQ_STATS,
+ LIO_VF_REP_REQ_DEVNAME
+};
+
+enum {
+ LIO_VF_REP_STATE_DOWN,
+ LIO_VF_REP_STATE_UP
+};
+
+#define LIO_IF_NAME_SIZE 16
+struct lio_vf_rep_req {
+ u8 req_type;
+ u8 ifidx;
+ u8 rsvd[6];
+
+ union {
+ struct lio_vf_rep_name {
+ char name[LIO_IF_NAME_SIZE];
+ } rep_name;
+
+ struct lio_vf_rep_mtu {
+ u32 mtu;
+ u32 rsvd;
+ } rep_mtu;
+
+ struct lio_vf_rep_state {
+ u8 state;
+ u8 rsvd[7];
+ } rep_state;
+ };
+};
+
+struct lio_vf_rep_resp {
+ u64 rh;
+ u8 status;
+ u8 rsvd[7];
+};
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
new file mode 100644
index 000000000..5bf5e8791
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h
@@ -0,0 +1,54 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#ifndef _LIQUIDIO_IMAGE_H_
+#define _LIQUIDIO_IMAGE_H_
+
+#define LIO_MAX_FW_TYPE_LEN (8)
+#define LIO_MAX_FW_FILENAME_LEN (256)
+#define LIO_FW_DIR "liquidio/"
+#define LIO_FW_BASE_NAME "lio_"
+#define LIO_FW_NAME_SUFFIX ".bin"
+#define LIO_FW_NAME_TYPE_NIC "nic"
+#define LIO_FW_NAME_TYPE_AUTO "auto"
+#define LIO_FW_NAME_TYPE_NONE "none"
+#define LIO_MAX_FIRMWARE_VERSION_LEN 16
+
+#define LIO_MAX_BOOTCMD_LEN 1024
+#define LIO_MAX_IMAGES 16
+#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */
+struct octeon_firmware_desc {
+ __be64 addr;
+ __be32 len;
+ __be32 crc32; /* crc32 of image */
+};
+
+/* Following the header is a list of 64-bit aligned binary images,
+ * as described by the desc field.
+ * Numeric fields are in network byte order.
+ */
+struct octeon_firmware_file_header {
+ __be32 magic;
+ char version[LIO_MAX_FIRMWARE_VERSION_LEN];
+ char bootcmd[LIO_MAX_BOOTCMD_LEN];
+ __be32 num_images;
+ struct octeon_firmware_desc desc[LIO_MAX_IMAGES];
+ __be32 pad;
+ __be32 crc32; /* header checksum */
+};
+
+#endif /* _LIQUIDIO_IMAGE_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
new file mode 100644
index 000000000..24c212001
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -0,0 +1,472 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file octeon_config.h
+ * \brief Host Driver: Configuration data structures for the host driver.
+ */
+
+#ifndef __OCTEON_CONFIG_H__
+#define __OCTEON_CONFIG_H__
+
+/*--------------------------CONFIG VALUES------------------------*/
+
+/* The following macros affect the way the driver data structures
+ * are generated for Octeon devices.
+ * They can be modified.
+ */
+
+/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
+ * multiple(<= MAX_OCTEON_NICIF) Miniports
+ */
+#define MAX_OCTEON_NICIF 128
+#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
+#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
+#define MAX_OCTEON_MULTICAST_ADDR 32
+
+#define MAX_OCTEON_FILL_COUNT 8
+
+/* CN6xxx IQ configuration macros */
+#define CN6XXX_MAX_INPUT_QUEUES 32
+#define CN6XXX_MAX_IQ_DESCRIPTORS 2048
+#define CN6XXX_DB_MIN 1
+#define CN6XXX_DB_MAX 8
+#define CN6XXX_DB_TIMEOUT 1
+
+/* CN6xxx OQ configuration macros */
+#define CN6XXX_MAX_OUTPUT_QUEUES 32
+#define CN6XXX_MAX_OQ_DESCRIPTORS 2048
+#define CN6XXX_OQ_BUF_SIZE 1664
+#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \
+ (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128)
+
+#define CN6XXX_OQ_INTR_PKT 64
+#define CN6XXX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_66XX 2
+#define DEFAULT_NUM_NIC_PORTS_68XX 4
+#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2
+
+/* CN23xx IQ configuration macros */
+#define CN23XX_MAX_VFS_PER_PF_PASS_1_0 8
+#define CN23XX_MAX_VFS_PER_PF_PASS_1_1 31
+#define CN23XX_MAX_VFS_PER_PF 63
+#define CN23XX_MAX_RINGS_PER_VF 8
+
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_0 12
+#define CN23XX_MAX_RINGS_PER_PF_PASS_1_1 32
+#define CN23XX_MAX_RINGS_PER_PF 64
+#define CN23XX_MAX_RINGS_PER_VF 8
+
+#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_DEFAULT_IQ_DESCRIPTORS 512
+#define CN23XX_MIN_IQ_DESCRIPTORS 128
+#define CN23XX_DB_MIN 1
+#define CN23XX_DB_MAX 8
+#define CN23XX_DB_TIMEOUT 1
+
+#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
+#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_DEFAULT_OQ_DESCRIPTORS 512
+#define CN23XX_MIN_OQ_DESCRIPTORS 128
+#define CN23XX_OQ_BUF_SIZE 1664
+#define CN23XX_OQ_PKTSPER_INTR 128
+/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
+#define CN23XX_OQ_REFIL_THRESHOLD 16
+
+#define CN23XX_OQ_INTR_PKT 64
+#define CN23XX_OQ_INTR_TIME 100
+#define DEFAULT_NUM_NIC_PORTS_23XX 1
+
+#define CN23XX_CFG_IO_QUEUES CN23XX_MAX_RINGS_PER_PF
+/* PEMs count */
+#define CN23XX_MAX_MACS 4
+
+#define CN23XX_DEF_IQ_INTR_THRESHOLD 32
+#define CN23XX_DEF_IQ_INTR_BYTE_THRESHOLD (64 * 1024)
+/* common OCTEON configuration macros */
+#define CN6XXX_CFG_IO_QUEUES 32
+#define OCTEON_32BYTE_INSTR 32
+#define OCTEON_64BYTE_INSTR 64
+#define OCTEON_MAX_BASE_IOQ 4
+
+#define OCTEON_DMA_INTR_PKT 64
+#define OCTEON_DMA_INTR_TIME 1000
+
+#define MAX_TXQS_PER_INTF 8
+#define MAX_RXQS_PER_INTF 8
+#define DEF_TXQS_PER_INTF 4
+#define DEF_RXQS_PER_INTF 4
+
+#define INVALID_IOQ_NO 0xff
+
+#define DEFAULT_POW_GRP 0
+
+/* Macros to get octeon config params */
+#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
+#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
+#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
+#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
+#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
+#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
+
+#define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
+#define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
+
+#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs)
+#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr)
+#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold)
+#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt)
+#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time)
+#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val
+#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val
+
+#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt)
+#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time)
+#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports)
+#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs)
+#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs)
+#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size)
+
+#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_txqs)
+#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_txqs)
+#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].max_rxqs)
+#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rxqs)
+#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs)
+#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs)
+#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].rx_buf_size)
+#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].base_queue)
+#define CFG_GET_GMXID_NIC_IF(cfg, idx) \
+ ((cfg)->nic_if_cfg[idx].gmx_port_id)
+
+#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp)
+#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.host_link_query_interval)
+#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \
+ ((cfg)->misc.oct_link_query_interval)
+#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
+
+#define CFG_SET_NUM_RX_DESCS_NIC_IF(cfg, idx, value) \
+ ((cfg)->nic_if_cfg[idx].num_rx_descs = value)
+#define CFG_SET_NUM_TX_DESCS_NIC_IF(cfg, idx, value) \
+ ((cfg)->nic_if_cfg[idx].num_tx_descs = value)
+
+/* Max IOQs per OCTEON Link */
+#define MAX_IOQS_PER_NICIF 64
+
+enum lio_card_type {
+ LIO_210SV = 0, /* Two port, 66xx */
+ LIO_210NV, /* Two port, 68xx */
+ LIO_410NV, /* Four port, 68xx */
+ LIO_23XX /* 23xx */
+};
+
+#define LIO_210SV_NAME "210sv"
+#define LIO_210NV_NAME "210nv"
+#define LIO_410NV_NAME "410nv"
+#define LIO_23XX_NAME "23xx"
+
+/** Structure to define the configuration attributes for each Input queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_iq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:16;
+
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+#else
+ /* Max number of IQs available */
+ u64 max_iqs:8;
+
+ /** Pending list size (usually set to the sum of the size of all Input
+ * queues)
+ */
+ u64 pending_list_size:32;
+
+ /** Command size - 32 or 64 bytes */
+ u64 instr_type:32;
+
+ /** Minimum number of commands pending to be posted to Octeon
+ * before driver hits the Input queue doorbell.
+ */
+ u64 db_min:8;
+
+ /** Minimum ticks to wait before checking for pending instructions. */
+ u64 db_timeout:16;
+
+ /** Tx interrupt packets. Applicable to 23xx only */
+ u64 iq_intr_pkt:16;
+
+ u64 reserved:16;
+#endif
+};
+
+/** Structure to define the configuration attributes for each Output queue.
+ * Applicable to all Octeon processors
+ **/
+struct octeon_oq_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:16;
+
+ u64 pkts_per_intr:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+#else
+ /* Max number of OQs available */
+ u64 max_oqs:8;
+
+ /** The number of buffers that were consumed during packet processing by
+ * the driver on this Output queue before the driver attempts to
+ * replenish
+ * the descriptor ring with new buffers.
+ */
+ u64 refill_threshold:16;
+
+ /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host
+ * only if it sent as many packets as specified by this field.
+ * The driver
+ * usually does not use packet count interrupt coalescing.
+ */
+ u64 oq_intr_pkt:16;
+
+ /** Interrupt Coalescing (Time Interval). Octeon will interrupt the
+ * host if atleast one packet was sent in the time interval specified
+ * by this field. The driver uses time interval interrupt coalescing
+ * by default. The time is specified in microseconds.
+ */
+ u64 oq_intr_time:16;
+
+ u64 pkts_per_intr:16;
+
+ u64 reserved:16;
+#endif
+
+};
+
+/** This structure conatins the NIC link configuration attributes,
+ * common for all the OCTEON Modles.
+ */
+struct octeon_nic_if_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved:56;
+
+ u64 base_queue:16;
+
+ u64 gmx_port_id:8;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+#else
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ u64 max_txqs:16;
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ u64 num_txqs:16;
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ u64 max_rxqs:16;
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ u64 num_rxqs:16;
+
+ /* Num of desc for rx rings */
+ u64 num_rx_descs:16;
+
+ /* Num of desc for tx rings */
+ u64 num_tx_descs:16;
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ u64 rx_buf_size:16;
+
+ u64 gmx_port_id:8;
+
+ u64 base_queue:16;
+
+ u64 reserved:56;
+#endif
+
+};
+
+/** Structure to define the configuration attributes for meta data.
+ * Applicable to all Octeon processors.
+ */
+
+struct octeon_misc_config {
+#ifdef __BIG_ENDIAN_BITFIELD
+ /** Host link status polling period */
+ u64 host_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 oct_link_query_interval:32;
+
+ u64 enable_sli_oq_bp:1;
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+#else
+ /** Control IQ Group */
+ u64 ctrlq_grp:4;
+ /** BP for SLI OQ */
+ u64 enable_sli_oq_bp:1;
+ /** Host link status polling period */
+ u64 oct_link_query_interval:32;
+ /** Oct link status polling period */
+ u64 host_link_query_interval:32;
+#endif
+};
+
+/** Structure to define the configuration for all OCTEON processors. */
+struct octeon_config {
+ u16 card_type;
+ char *card_name;
+
+ /** Input Queue attributes. */
+ struct octeon_iq_config iq;
+
+ /** Output Queue attributes. */
+ struct octeon_oq_config oq;
+
+ /** NIC Port Configuration */
+ struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF];
+
+ /** Miscellaneous attributes */
+ struct octeon_misc_config misc;
+
+ int num_nic_ports;
+
+ int num_def_tx_descs;
+
+ /* Num of desc for rx rings */
+ int num_def_rx_descs;
+
+ int def_rx_buf_size;
+
+};
+
+/* The following config values are fixed and should not be modified. */
+
+#define BAR1_INDEX_DYNAMIC_MAP 2
+#define BAR1_INDEX_STATIC_MAP 15
+#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024)
+
+#define MAX_BAR1_IOREMAP_SIZE (16 * OCTEON_BAR1_ENTRY_SIZE)
+
+/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking
+ * 1 process done list, 1 zombie lists(timeouted sc list)
+ * NoResponse Lists are now maintained with each IQ. (Dec' 2007).
+ */
+#define MAX_RESPONSE_LISTS 6
+
+/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the
+ * dispatch table.
+ */
+#define OPCODE_MASK_BITS 6
+
+/* Mask for the 6-bit lookup hash */
+#define OCTEON_OPCODE_MASK 0x3f
+
+/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */
+#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_INSTR_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_INPUT_QUEUES : \
+ CN6XXX_MAX_INPUT_QUEUES)
+
+/* Maximum number of Octeon Instruction (command) queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) \
+ (OCTEON_CN23XX_PF(oct) ? CN23XX_MAX_OUTPUT_QUEUES : \
+ CN6XXX_MAX_OUTPUT_QUEUES)
+
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN23XX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN23XX_MAX_OUTPUT_QUEUES
+
+#define MAX_POSSIBLE_VFS 64
+
+#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
new file mode 100644
index 000000000..28feabec8
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -0,0 +1,920 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*
+ * @file octeon_console.c
+ */
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/crc32.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "liquidio_image.h"
+#include "octeon_mem_ops.h"
+
+static void octeon_remote_lock(void);
+static void octeon_remote_unlock(void);
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags);
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size);
+
+#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008
+#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004
+#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000
+#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100
+#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248
+
+#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001
+#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002
+
+/** Can change without breaking ABI */
+#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
+
+/** minimum alignment of bootmem alloced blocks */
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+
+/** CVMX bootmem descriptor major version */
+#define CVMX_BOOTMEM_DESC_MAJ_VER 3
+/* CVMX bootmem descriptor minor version */
+#define CVMX_BOOTMEM_DESC_MIN_VER 0
+
+/* Current versions */
+#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1
+#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0
+#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console"
+#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */
+
+/* First three members of cvmx_bootmem_desc are left in original
+ * positions for backwards compatibility.
+ * Assumes big endian target
+ */
+struct cvmx_bootmem_desc {
+ /** spinlock to control access to list */
+ u32 lock;
+
+ /** flags for indicating various conditions */
+ u32 flags;
+
+ u64 head_addr;
+
+ /** incremented changed when incompatible changes made */
+ u32 major_version;
+
+ /** incremented changed when compatible changes made,
+ * reset to zero when major incremented
+ */
+ u32 minor_version;
+
+ u64 app_data_addr;
+ u64 app_data_size;
+
+ /** number of elements in named blocks array */
+ u32 nb_num_blocks;
+
+ /** length of name array in bootmem blocks */
+ u32 named_block_name_len;
+
+ /** address of named memory block descriptors */
+ u64 named_block_array_addr;
+};
+
+/* Structure that defines a single console.
+ *
+ * Note: when read_index == write_index, the buffer is empty.
+ * The actual usable size of each console is console_buf_size -1;
+ */
+struct octeon_pci_console {
+ u64 input_base_addr;
+ u32 input_read_index;
+ u32 input_write_index;
+ u64 output_base_addr;
+ u32 output_read_index;
+ u32 output_write_index;
+ u32 lock;
+ u32 buf_size;
+};
+
+/* This is the main container structure that contains all the information
+ * about all PCI consoles. The address of this structure is passed to various
+ * routines that operation on PCI consoles.
+ */
+struct octeon_pci_console_desc {
+ u32 major_version;
+ u32 minor_version;
+ u32 lock;
+ u32 flags;
+ u32 num_consoles;
+ u32 pad;
+ /* must be 64 bit aligned here... */
+ /* Array of addresses of octeon_pci_console structures */
+ u64 console_addr_array[];
+ /* Implicit storage for console_addr_array */
+};
+
+/*
+ * This function is the implementation of the get macros defined
+ * for individual structure members. The argument are generated
+ * by the macros inorder to read only the needed memory.
+ *
+ * @param oct Pointer to current octeon device
+ * @param base 64bit physical address of the complete structure
+ * @param offset Offset from the beginning of the structure to the member being
+ * accessed.
+ * @param size Size of the structure member.
+ *
+ * @return Value of the structure member promoted into a u64.
+ */
+static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
+ u64 base,
+ u32 offset,
+ u32 size)
+{
+ base = (1ull << 63) | (base + offset);
+ switch (size) {
+ case 4:
+ return octeon_read_device_mem32(oct, base);
+ case 8:
+ return octeon_read_device_mem64(oct, base);
+ default:
+ return 0;
+ }
+}
+
+/*
+ * This function retrieves the string name of a named block. It is
+ * more complicated than a simple memcpy() since the named block
+ * descriptor may not be directly accessible.
+ *
+ * @param addr Physical address of the named block descriptor
+ * @param str String to receive the named block string name
+ * @param len Length of the string buffer, which must match the length
+ * stored in the bootmem descriptor.
+ */
+static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
+ u64 addr,
+ char *str,
+ u32 len)
+{
+ addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
+ octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
+ str[len] = 0;
+}
+
+/* See header file for descriptions of functions */
+
+/*
+ * Check the version information on the bootmem descriptor
+ *
+ * @param exact_match
+ * Exact major version to check against. A zero means
+ * check that the version supports named blocks.
+ *
+ * @return Zero if the version is correct. Negative if the version is
+ * incorrect. Failures also cause a message to be displayed.
+ */
+static int __cvmx_bootmem_check_version(struct octeon_device *oct,
+ u32 exact_match)
+{
+ u32 major_version;
+ u32 minor_version;
+
+ if (!oct->bootmem_desc_addr)
+ oct->bootmem_desc_addr =
+ octeon_read_device_mem64(oct,
+ BOOTLOADER_PCI_READ_DESC_ADDR);
+ major_version = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc, major_version),
+ sizeof_field(struct cvmx_bootmem_desc, major_version));
+ minor_version = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc, minor_version),
+ sizeof_field(struct cvmx_bootmem_desc, minor_version));
+
+ dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__,
+ major_version);
+ if ((major_version > 3) ||
+ (exact_match && major_version != exact_match)) {
+ dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n",
+ major_version, minor_version,
+ (long long)oct->bootmem_desc_addr);
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static const struct cvmx_bootmem_named_block_desc
+*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct,
+ const char *name, u32 flags)
+{
+ struct cvmx_bootmem_named_block_desc *desc =
+ &oct->bootmem_named_block_desc;
+ u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags);
+
+ if (named_addr) {
+ desc->base_addr = __cvmx_bootmem_desc_get(
+ oct, named_addr,
+ offsetof(struct cvmx_bootmem_named_block_desc,
+ base_addr),
+ sizeof_field(
+ struct cvmx_bootmem_named_block_desc,
+ base_addr));
+ desc->size = __cvmx_bootmem_desc_get(oct, named_addr,
+ offsetof(struct cvmx_bootmem_named_block_desc,
+ size),
+ sizeof_field(
+ struct cvmx_bootmem_named_block_desc,
+ size));
+
+ strncpy(desc->name, name, sizeof(desc->name));
+ desc->name[sizeof(desc->name) - 1] = 0;
+ return &oct->bootmem_named_block_desc;
+ } else {
+ return NULL;
+ }
+}
+
+static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
+ const char *name,
+ u32 flags)
+{
+ u64 result = 0;
+
+ if (!__cvmx_bootmem_check_version(oct, 3)) {
+ u32 i;
+
+ u64 named_block_array_addr = __cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ named_block_array_addr),
+ sizeof_field(struct cvmx_bootmem_desc,
+ named_block_array_addr));
+ u32 num_blocks = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ nb_num_blocks),
+ sizeof_field(struct cvmx_bootmem_desc,
+ nb_num_blocks));
+
+ u32 name_length = (u32)__cvmx_bootmem_desc_get(
+ oct, oct->bootmem_desc_addr,
+ offsetof(struct cvmx_bootmem_desc,
+ named_block_name_len),
+ sizeof_field(struct cvmx_bootmem_desc,
+ named_block_name_len));
+
+ u64 named_addr = named_block_array_addr;
+
+ for (i = 0; i < num_blocks; i++) {
+ u64 named_size = __cvmx_bootmem_desc_get(
+ oct, named_addr,
+ offsetof(
+ struct cvmx_bootmem_named_block_desc,
+ size),
+ sizeof_field(
+ struct cvmx_bootmem_named_block_desc,
+ size));
+
+ if (name && named_size) {
+ char *name_tmp =
+ kmalloc(name_length + 1, GFP_KERNEL);
+ if (!name_tmp)
+ break;
+
+ CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
+ name_tmp,
+ name_length);
+ if (!strncmp(name, name_tmp, name_length)) {
+ result = named_addr;
+ kfree(name_tmp);
+ break;
+ }
+ kfree(name_tmp);
+ } else if (!name && !named_size) {
+ result = named_addr;
+ break;
+ }
+
+ named_addr +=
+ sizeof(struct cvmx_bootmem_named_block_desc);
+ }
+ }
+ return result;
+}
+
+/*
+ * Find a named block on the remote Octeon
+ *
+ * @param name Name of block to find
+ * @param base_addr Address the block is at (OUTPUT)
+ * @param size The size of the block (OUTPUT)
+ *
+ * @return Zero on success, One on failure.
+ */
+static int octeon_named_block_find(struct octeon_device *oct, const char *name,
+ u64 *base_addr, u64 *size)
+{
+ const struct cvmx_bootmem_named_block_desc *named_block;
+
+ octeon_remote_lock();
+ named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0);
+ octeon_remote_unlock();
+ if (named_block) {
+ *base_addr = named_block->base_addr;
+ *size = named_block->size;
+ return 0;
+ }
+ return 1;
+}
+
+static void octeon_remote_lock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+static void octeon_remote_unlock(void)
+{
+ /* fill this in if any sharing is needed */
+}
+
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths)
+{
+ u32 len = (u32)strlen(cmd_str);
+
+ dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
+
+ if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) {
+ dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n",
+ BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1);
+ return -1;
+ }
+
+ if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) {
+ dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n");
+ return -1;
+ }
+
+ /* Write command to bootloader */
+ octeon_remote_lock();
+ octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR,
+ (u8 *)cmd_str, len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR,
+ len);
+ octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR,
+ OCTEON_PCI_IO_BUF_OWNER_OCTEON);
+
+ /* Bootloader should accept command very quickly
+ * if it really was ready
+ */
+ if (octeon_wait_for_bootloader(oct, 200) != 0) {
+ octeon_remote_unlock();
+ dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n");
+ return -1;
+ }
+ octeon_remote_unlock();
+ return 0;
+}
+
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths)
+{
+ dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n",
+ wait_time_hundredths);
+
+ if (octeon_mem_access_ok(oct))
+ return -1;
+
+ while (wait_time_hundredths > 0 &&
+ octeon_read_device_mem32(oct,
+ BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR)
+ != OCTEON_PCI_IO_BUF_OWNER_HOST) {
+ if (--wait_time_hundredths <= 0)
+ return -1;
+ schedule_timeout_uninterruptible(HZ / 100);
+ }
+ return 0;
+}
+
+static void octeon_console_handle_result(struct octeon_device *oct,
+ size_t console_num)
+{
+ struct octeon_console *console;
+
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+}
+
+static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES];
+
+static void output_console_line(struct octeon_device *oct,
+ struct octeon_console *console,
+ size_t console_num,
+ char *console_buffer,
+ s32 bytes_read)
+{
+ char *line;
+ s32 i;
+ size_t len;
+
+ line = console_buffer;
+ for (i = 0; i < bytes_read; i++) {
+ /* Output a line at a time, prefixed */
+ if (console_buffer[i] == '\n') {
+ console_buffer[i] = '\0';
+ /* We need to output 'line', prefaced by 'leftover'.
+ * However, it is possible we're being called to
+ * output 'leftover' by itself (in the case of nothing
+ * having been read from the console).
+ *
+ * To avoid duplication, check for this condition.
+ */
+ if (console->leftover[0] &&
+ (line != console->leftover)) {
+ if (console->print)
+ (*console->print)(oct, (u32)console_num,
+ console->leftover,
+ line);
+ console->leftover[0] = '\0';
+ } else {
+ if (console->print)
+ (*console->print)(oct, (u32)console_num,
+ line, NULL);
+ }
+ line = &console_buffer[i + 1];
+ }
+ }
+
+ /* Save off any leftovers */
+ if (line != &console_buffer[bytes_read]) {
+ console_buffer[bytes_read] = '\0';
+ len = strlen(console->leftover);
+ strncpy(&console->leftover[len], line,
+ sizeof(console->leftover) - len);
+ }
+}
+
+static void check_console(struct work_struct *work)
+{
+ s32 bytes_read, tries, total_read;
+ size_t len;
+ struct octeon_console *console;
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ u32 console_num = (u32)wk->ctxul;
+ u32 delay;
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct, console_num, console_buffer,
+ sizeof(console_buffer) - 1);
+ if (bytes_read > 0) {
+ total_read += bytes_read;
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
+ if (console->print) {
+ output_console_line(oct, console, console_num,
+ console_buffer, bytes_read);
+ }
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if (console->print && (total_read == 0) &&
+ (console->leftover[0])) {
+ /* append '\n' as terminator for 'output_console_line' */
+ len = strlen(console->leftover);
+ console->leftover[len] = '\n';
+ output_console_line(oct, console, console_num,
+ console->leftover, (s32)(len + 1));
+ console->leftover[0] = '\0';
+ }
+
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+
+ schedule_delayed_work(&wk->work, msecs_to_jiffies(delay));
+}
+
+int octeon_init_consoles(struct octeon_device *oct)
+{
+ int ret = 0;
+ u64 addr, size;
+
+ ret = octeon_mem_access_ok(oct);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Memory access not okay'\n");
+ return ret;
+ }
+
+ ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr,
+ &size);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n",
+ OCTEON_PCI_CONSOLE_BLOCK_NAME);
+ return ret;
+ }
+
+ /* Dedicate one of Octeon's BAR1 index registers to create a static
+ * mapping to a region of Octeon DRAM that contains the PCI console
+ * named block.
+ */
+ oct->console_nb_info.bar1_index = BAR1_INDEX_STATIC_MAP;
+ oct->fn_list.bar1_idx_setup(oct, addr, oct->console_nb_info.bar1_index,
+ true);
+ oct->console_nb_info.dram_region_base = addr
+ & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL);
+
+ /* num_consoles > 0, is an indication that the consoles
+ * are accessible
+ */
+ oct->num_consoles = octeon_read_device_mem32(oct,
+ addr + offsetof(struct octeon_pci_console_desc,
+ num_consoles));
+ oct->console_desc_addr = addr;
+
+ dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n",
+ oct->num_consoles);
+
+ return ret;
+}
+
+static void octeon_get_uboot_version(struct octeon_device *oct)
+{
+ s32 bytes_read, tries, total_read;
+ struct octeon_console *console;
+ u32 console_num = 0;
+ char *uboot_ver;
+ char *buf;
+ char *p;
+
+#define OCTEON_UBOOT_VER_BUF_SIZE 512
+ buf = kmalloc(OCTEON_UBOOT_VER_BUF_SIZE, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ if (octeon_console_send_cmd(oct, "setenv stdout pci\n", 50)) {
+ kfree(buf);
+ return;
+ }
+
+ if (octeon_console_send_cmd(oct, "version\n", 1)) {
+ kfree(buf);
+ return;
+ }
+
+ console = &oct->console[console_num];
+ tries = 0;
+ total_read = 0;
+
+ do {
+ /* Take console output regardless of whether it will
+ * be logged
+ */
+ bytes_read =
+ octeon_console_read(oct,
+ console_num, buf + total_read,
+ OCTEON_UBOOT_VER_BUF_SIZE - 1 -
+ total_read);
+ if (bytes_read > 0) {
+ buf[bytes_read] = '\0';
+
+ total_read += bytes_read;
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
+ } else if (bytes_read < 0) {
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
+ console_num, bytes_read);
+ }
+
+ tries++;
+ } while ((bytes_read > 0) && (tries < 16));
+
+ /* If nothing is read after polling the console,
+ * output any leftovers if any
+ */
+ if ((total_read == 0) && (console->leftover[0])) {
+ dev_dbg(&oct->pci_dev->dev, "%u: %s\n",
+ console_num, console->leftover);
+ console->leftover[0] = '\0';
+ }
+
+ buf[OCTEON_UBOOT_VER_BUF_SIZE - 1] = '\0';
+
+ uboot_ver = strstr(buf, "U-Boot");
+ if (uboot_ver) {
+ p = strstr(uboot_ver, "mips");
+ if (p) {
+ p--;
+ *p = '\0';
+ dev_info(&oct->pci_dev->dev, "%s\n", uboot_ver);
+ }
+ }
+
+ kfree(buf);
+ octeon_console_send_cmd(oct, "setenv stdout serial\n", 50);
+}
+
+int octeon_add_console(struct octeon_device *oct, u32 console_num,
+ char *dbg_enb)
+{
+ int ret = 0;
+ u32 delay;
+ u64 coreaddr;
+ struct delayed_work *work;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev,
+ "trying to read from console number %d when only 0 to %d exist\n",
+ console_num, oct->num_consoles);
+ } else {
+ console = &oct->console[console_num];
+
+ console->waiting = 0;
+
+ coreaddr = oct->console_desc_addr + console_num * 8 +
+ offsetof(struct octeon_pci_console_desc,
+ console_addr_array);
+ console->addr = octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ buf_size);
+ console->buffer_size = octeon_read_device_mem32(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ input_base_addr);
+ console->input_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ coreaddr = console->addr + offsetof(struct octeon_pci_console,
+ output_base_addr);
+ console->output_base_addr =
+ octeon_read_device_mem64(oct, coreaddr);
+ console->leftover[0] = '\0';
+
+ work = &oct->console_poll_work[console_num].work;
+
+ octeon_get_uboot_version(oct);
+
+ INIT_DELAYED_WORK(work, check_console);
+ oct->console_poll_work[console_num].ctxptr = (void *)oct;
+ oct->console_poll_work[console_num].ctxul = console_num;
+ delay = OCTEON_CONSOLE_POLL_INTERVAL_MS;
+ schedule_delayed_work(work, msecs_to_jiffies(delay));
+
+ /* an empty string means use default debug console enablement */
+ if (dbg_enb && !dbg_enb[0])
+ dbg_enb = "setenv pci_console_active 1";
+ if (dbg_enb)
+ ret = octeon_console_send_cmd(oct, dbg_enb, 2000);
+
+ console->active = 1;
+ }
+
+ return ret;
+}
+
+/*
+ * Removes all consoles
+ *
+ * @param oct octeon device
+ */
+void octeon_remove_consoles(struct octeon_device *oct)
+{
+ u32 i;
+ struct octeon_console *console;
+
+ for (i = 0; i < oct->num_consoles; i++) {
+ console = &oct->console[i];
+
+ if (!console->active)
+ continue;
+
+ cancel_delayed_work_sync(&oct->console_poll_work[i].
+ work);
+ console->addr = 0;
+ console->buffer_size = 0;
+ console->input_base_addr = 0;
+ console->output_base_addr = 0;
+ }
+
+ oct->num_consoles = 0;
+}
+
+static inline int octeon_console_free_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size;
+}
+
+static inline int octeon_console_avail_bytes(u32 buffer_size,
+ u32 wr_idx,
+ u32 rd_idx)
+{
+ if (rd_idx >= buffer_size || wr_idx >= buffer_size)
+ return -1;
+
+ return buffer_size - 1 -
+ octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
+}
+
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size)
+{
+ int bytes_to_read;
+ u32 rd_idx, wr_idx;
+ struct octeon_console *console;
+
+ if (console_num >= oct->num_consoles) {
+ dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n",
+ console_num);
+ return 0;
+ }
+
+ console = &oct->console[console_num];
+
+ /* Check to see if any data is available.
+ * Maybe optimize this with 64-bit read.
+ */
+ rd_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_read_index));
+ wr_idx = octeon_read_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console, output_write_index));
+
+ bytes_to_read = octeon_console_avail_bytes(console->buffer_size,
+ wr_idx, rd_idx);
+ if (bytes_to_read <= 0)
+ return bytes_to_read;
+
+ bytes_to_read = min_t(s32, bytes_to_read, buf_size);
+
+ /* Check to see if what we want to read is not contiguous, and limit
+ * ourselves to the contiguous block
+ */
+ if (rd_idx + bytes_to_read >= console->buffer_size)
+ bytes_to_read = console->buffer_size - rd_idx;
+
+ octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
+ (u8 *)buffer, bytes_to_read);
+ octeon_write_device_mem32(oct, console->addr +
+ offsetof(struct octeon_pci_console,
+ output_read_index),
+ (rd_idx + bytes_to_read) %
+ console->buffer_size);
+
+ return bytes_to_read;
+}
+
+#define FBUF_SIZE (4 * 1024 * 1024)
+#define MAX_BOOTTIME_SIZE 80
+
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size)
+{
+ struct octeon_firmware_file_header *h;
+ char boottime[MAX_BOOTTIME_SIZE];
+ struct timespec64 ts;
+ u32 crc32_result;
+ u64 load_addr;
+ u32 image_len;
+ int ret = 0;
+ u32 i, rem;
+
+ if (size < sizeof(struct octeon_firmware_file_header)) {
+ dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
+ (u32)size,
+ (u32)sizeof(struct octeon_firmware_file_header));
+ return -EINVAL;
+ }
+
+ h = (struct octeon_firmware_file_header *)data;
+
+ if (be32_to_cpu(h->magic) != LIO_NIC_MAGIC) {
+ dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n");
+ return -EINVAL;
+ }
+
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
+ if (crc32_result != be32_to_cpu(h->crc32)) {
+ dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
+ crc32_result, be32_to_cpu(h->crc32));
+ return -EINVAL;
+ }
+
+ if (memcmp(LIQUIDIO_BASE_VERSION, h->version,
+ strlen(LIQUIDIO_BASE_VERSION))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION,
+ h->version);
+ return -EINVAL;
+ }
+
+ if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) {
+ dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n",
+ be32_to_cpu(h->num_images));
+ return -EINVAL;
+ }
+
+ dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version);
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
+ h->version);
+
+ data += sizeof(struct octeon_firmware_file_header);
+
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
+ /* load all images */
+ for (i = 0; i < be32_to_cpu(h->num_images); i++) {
+ load_addr = be64_to_cpu(h->desc[i].addr);
+ image_len = be32_to_cpu(h->desc[i].len);
+
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
+
+ /* Write in 4MB chunks*/
+ rem = image_len;
+
+ while (rem) {
+ if (rem < FBUF_SIZE)
+ size = rem;
+ else
+ size = FBUF_SIZE;
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, data, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
+ }
+
+ /* Pass date and time information to NIC at the time of loading
+ * firmware and periodically update the host time to NIC firmware.
+ * This is to make NIC firmware use the same time reference as Host,
+ * so that it is easy to correlate logs from firmware and host for
+ * debugging.
+ *
+ * Octeon always uses UTC time. so timezone information is not sent.
+ */
+ ktime_get_real_ts64(&ts);
+ ret = snprintf(boottime, MAX_BOOTTIME_SIZE,
+ " time_sec=%lld time_nsec=%ld",
+ (s64)ts.tv_sec, ts.tv_nsec);
+ if ((sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd))) <
+ ret) {
+ dev_err(&oct->pci_dev->dev, "Boot command buffer too small\n");
+ return -EINVAL;
+ }
+ strncat(h->bootcmd, boottime,
+ sizeof(h->bootcmd) - strnlen(h->bootcmd, sizeof(h->bootcmd)));
+
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
+
+ /* Invoke the bootcmd */
+ ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
+ if (ret)
+ dev_info(&oct->pci_dev->dev, "Boot command send failed\n");
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
new file mode 100644
index 000000000..e159194d0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -0,0 +1,1464 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
+
+/** Default configuration
+ * for CN66XX OCTEON Models.
+ */
+static struct octeon_config default_cn66xx_conf = {
+ .card_type = LIO_210SV,
+ .card_name = LIO_210SV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+
+static struct octeon_config default_cn68xx_conf = {
+ .card_type = LIO_410NV,
+ .card_name = LIO_410NV_NAME,
+
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .nic_if_cfg[2] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 2,
+ },
+
+ .nic_if_cfg[3] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 3,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+/** Default configuration
+ * for CN68XX OCTEON Model.
+ */
+static struct octeon_config default_cn68xx_210nv_conf = {
+ .card_type = LIO_210NV,
+ .card_name = LIO_210NV_NAME,
+
+ /** IQ attributes */
+
+ .iq = {
+ .max_iqs = CN6XXX_CFG_IO_QUEUES,
+ .pending_list_size =
+ (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN6XXX_DB_MIN,
+ .db_timeout = CN6XXX_DB_TIMEOUT,
+ }
+ ,
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN6XXX_CFG_IO_QUEUES,
+ .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN6XXX_OQ_INTR_PKT,
+ .oq_intr_time = CN6XXX_OQ_INTR_TIME,
+ .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR,
+ }
+ ,
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV,
+ .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN6XXX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ /** Miscellaneous attributes */
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+ ,
+};
+
+static struct octeon_config default_cn23xx_conf = {
+ .card_type = LIO_23XX,
+ .card_name = LIO_23XX_NAME,
+ /** IQ attributes */
+ .iq = {
+ .max_iqs = CN23XX_CFG_IO_QUEUES,
+ .pending_list_size = (CN23XX_DEFAULT_IQ_DESCRIPTORS *
+ CN23XX_CFG_IO_QUEUES),
+ .instr_type = OCTEON_64BYTE_INSTR,
+ .db_min = CN23XX_DB_MIN,
+ .db_timeout = CN23XX_DB_TIMEOUT,
+ .iq_intr_pkt = CN23XX_DEF_IQ_INTR_THRESHOLD,
+ },
+
+ /** OQ attributes */
+ .oq = {
+ .max_oqs = CN23XX_CFG_IO_QUEUES,
+ .pkts_per_intr = CN23XX_OQ_PKTSPER_INTR,
+ .refill_threshold = CN23XX_OQ_REFIL_THRESHOLD,
+ .oq_intr_pkt = CN23XX_OQ_INTR_PKT,
+ .oq_intr_time = CN23XX_OQ_INTR_TIME,
+ },
+
+ .num_nic_ports = DEFAULT_NUM_NIC_PORTS_23XX,
+ .num_def_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
+ .num_def_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
+ .def_rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ /* For ethernet interface 0: Port cfg Attributes */
+ .nic_if_cfg[0] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 0,
+ },
+
+ .nic_if_cfg[1] = {
+ /* Max Txqs: Half for each of the two ports :max_iq/2 */
+ .max_txqs = MAX_TXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_txqs */
+ .num_txqs = DEF_TXQS_PER_INTF,
+
+ /* Max Rxqs: Half for each of the two ports :max_oq/2 */
+ .max_rxqs = MAX_RXQS_PER_INTF,
+
+ /* Actual configured value. Range could be: 1...max_rxqs */
+ .num_rxqs = DEF_RXQS_PER_INTF,
+
+ /* Num of desc for rx rings */
+ .num_rx_descs = CN23XX_DEFAULT_OQ_DESCRIPTORS,
+
+ /* Num of desc for tx rings */
+ .num_tx_descs = CN23XX_DEFAULT_IQ_DESCRIPTORS,
+
+ /* SKB size, We need not change buf size even for Jumbo frames.
+ * Octeon can send jumbo frames in 4 consecutive descriptors,
+ */
+ .rx_buf_size = CN23XX_OQ_BUF_SIZE,
+
+ .base_queue = BASE_QUEUE_NOT_REQUESTED,
+
+ .gmx_port_id = 1,
+ },
+
+ .misc = {
+ /* Host driver link query interval */
+ .oct_link_query_interval = 100,
+
+ /* Octeon link query interval */
+ .host_link_query_interval = 500,
+
+ .enable_sli_oq_bp = 0,
+
+ /* Control queue group */
+ .ctrlq_grp = 1,
+ }
+};
+
+static struct octeon_config_ptr {
+ u32 conf_type;
+} oct_conf_info[MAX_OCTEON_DEVICES] = {
+ {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ }, {
+ OCTEON_CONFIG_TYPE_DEFAULT,
+ },
+};
+
+static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
+ "BEGIN", "PCI-ENABLE-DONE", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
+ "DROQ-INIT-DONE", "MBOX-SETUP-DONE", "MSIX-ALLOC-VECTOR-DONE",
+ "INTR-SET-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "INVALID"
+};
+
+static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = {
+ "BASE", "NIC", "UNKNOWN"};
+
+static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES];
+static atomic_t adapter_refcounts[MAX_OCTEON_DEVICES];
+static atomic_t adapter_fw_states[MAX_OCTEON_DEVICES];
+
+static u32 octeon_device_count;
+/* locks device array (i.e. octeon_device[]) */
+static DEFINE_SPINLOCK(octeon_devices_lock);
+
+static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES];
+
+static void oct_set_config_info(int oct_id, int conf_type)
+{
+ if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1))
+ conf_type = OCTEON_CONFIG_TYPE_DEFAULT;
+ oct_conf_info[oct_id].conf_type = conf_type;
+}
+
+void octeon_init_device_list(int conf_type)
+{
+ int i;
+
+ memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES));
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ oct_set_config_info(i, conf_type);
+}
+
+static void *__retrieve_octeon_config_info(struct octeon_device *oct,
+ u16 card_type)
+{
+ u32 oct_id = oct->octeon_id;
+ void *ret = NULL;
+
+ switch (oct_conf_info[oct_id].conf_type) {
+ case OCTEON_CONFIG_TYPE_DEFAULT:
+ if (oct->chip_id == OCTEON_CN66XX) {
+ ret = &default_cn66xx_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_210NV)) {
+ ret = &default_cn68xx_210nv_conf;
+ } else if ((oct->chip_id == OCTEON_CN68XX) &&
+ (card_type == LIO_410NV)) {
+ ret = &default_cn68xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) {
+ ret = &default_cn23xx_conf;
+ } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+ ret = &default_cn23xx_conf;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int __verify_octeon_config_info(struct octeon_device *oct, void *conf)
+{
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX:
+ return lio_validate_cn6xxx_config_info(oct, conf);
+ case OCTEON_CN23XX_PF_VID:
+ case OCTEON_CN23XX_VF_VID:
+ return 0;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type)
+{
+ void *conf = NULL;
+
+ conf = __retrieve_octeon_config_info(oct, card_type);
+ if (!conf)
+ return NULL;
+
+ if (__verify_octeon_config_info(oct, conf)) {
+ dev_err(&oct->pci_dev->dev, "Configuration verification failed\n");
+ return NULL;
+ }
+
+ return conf;
+}
+
+char *lio_get_state_string(atomic_t *state_ptr)
+{
+ s32 istate = (s32)atomic_read(state_ptr);
+
+ if (istate > OCT_DEV_STATES || istate < 0)
+ return oct_dev_state_str[OCT_DEV_STATE_INVALID];
+ return oct_dev_state_str[istate];
+}
+
+static char *get_oct_app_string(u32 app_mode)
+{
+ if (app_mode <= CVM_DRV_APP_END)
+ return oct_dev_app_str[app_mode - CVM_DRV_APP_START];
+ return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
+}
+
+void octeon_free_device_mem(struct octeon_device *oct)
+{
+ int i;
+
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (oct->io_qmask.oq & BIT_ULL(i))
+ vfree(oct->droq[i]);
+ }
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (oct->io_qmask.iq & BIT_ULL(i))
+ vfree(oct->instr_queue[i]);
+ }
+
+ i = oct->octeon_id;
+ vfree(oct);
+
+ octeon_device[i] = NULL;
+ octeon_device_count--;
+}
+
+static struct octeon_device *octeon_allocate_device_mem(u32 pci_id,
+ u32 priv_size)
+{
+ struct octeon_device *oct;
+ u8 *buf = NULL;
+ u32 octdevsize = 0, configsize = 0, size;
+
+ switch (pci_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ configsize = sizeof(struct octeon_cn6xxx);
+ break;
+
+ case OCTEON_CN23XX_PF_VID:
+ configsize = sizeof(struct octeon_cn23xx_pf);
+ break;
+ case OCTEON_CN23XX_VF_VID:
+ configsize = sizeof(struct octeon_cn23xx_vf);
+ break;
+ default:
+ pr_err("%s: Unknown PCI Device: 0x%x\n",
+ __func__,
+ pci_id);
+ return NULL;
+ }
+
+ if (configsize & 0x7)
+ configsize += (8 - (configsize & 0x7));
+
+ octdevsize = sizeof(struct octeon_device);
+ if (octdevsize & 0x7)
+ octdevsize += (8 - (octdevsize & 0x7));
+
+ if (priv_size & 0x7)
+ priv_size += (8 - (priv_size & 0x7));
+
+ size = octdevsize + priv_size + configsize +
+ (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE);
+
+ buf = vzalloc(size);
+ if (!buf)
+ return NULL;
+
+ oct = (struct octeon_device *)buf;
+ oct->priv = (void *)(buf + octdevsize);
+ oct->chip = (void *)(buf + octdevsize + priv_size);
+ oct->dispatch.dlist = (struct octeon_dispatch *)
+ (buf + octdevsize + priv_size + configsize);
+
+ return oct;
+}
+
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size)
+{
+ u32 oct_idx = 0;
+ struct octeon_device *oct = NULL;
+
+ spin_lock(&octeon_devices_lock);
+
+ for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++)
+ if (!octeon_device[oct_idx])
+ break;
+
+ if (oct_idx < MAX_OCTEON_DEVICES) {
+ oct = octeon_allocate_device_mem(pci_id, priv_size);
+ if (oct) {
+ octeon_device_count++;
+ octeon_device[oct_idx] = oct;
+ }
+ }
+
+ spin_unlock(&octeon_devices_lock);
+ if (!oct)
+ return NULL;
+
+ spin_lock_init(&oct->pci_win_lock);
+ spin_lock_init(&oct->mem_access_lock);
+
+ oct->octeon_id = oct_idx;
+ snprintf(oct->device_name, sizeof(oct->device_name),
+ "LiquidIO%d", (oct->octeon_id));
+
+ return oct;
+}
+
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf)
+{
+ int idx, refcount;
+
+ oct->loc.bus = bus;
+ oct->loc.dev = dev;
+ oct->loc.func = func;
+
+ oct->adapter_refcount = &adapter_refcounts[oct->octeon_id];
+ atomic_set(oct->adapter_refcount, 0);
+
+ /* Like the reference count, the f/w state is shared 'per-adapter' */
+ oct->adapter_fw_state = &adapter_fw_states[oct->octeon_id];
+ atomic_set(oct->adapter_fw_state, FW_NEEDS_TO_BE_LOADED);
+
+ spin_lock(&octeon_devices_lock);
+ for (idx = (int)oct->octeon_id - 1; idx >= 0; idx--) {
+ if (!octeon_device[idx]) {
+ dev_err(&oct->pci_dev->dev,
+ "%s: Internal driver error, missing dev",
+ __func__);
+ spin_unlock(&octeon_devices_lock);
+ atomic_inc(oct->adapter_refcount);
+ return 1; /* here, refcount is guaranteed to be 1 */
+ }
+ /* If another device is at same bus/dev, use its refcounter
+ * (and f/w state variable).
+ */
+ if ((octeon_device[idx]->loc.bus == bus) &&
+ (octeon_device[idx]->loc.dev == dev)) {
+ oct->adapter_refcount =
+ octeon_device[idx]->adapter_refcount;
+ oct->adapter_fw_state =
+ octeon_device[idx]->adapter_fw_state;
+ break;
+ }
+ }
+ spin_unlock(&octeon_devices_lock);
+
+ atomic_inc(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %02x:%02x:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct)
+{
+ int refcount;
+
+ atomic_dec(oct->adapter_refcount);
+ refcount = atomic_read(oct->adapter_refcount);
+
+ dev_dbg(&oct->pci_dev->dev, "%s: %04d:%02d:%d refcount %u", __func__,
+ oct->loc.bus, oct->loc.dev, oct->loc.func, refcount);
+
+ return refcount;
+}
+
+int
+octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs)
+{
+ struct octeon_ioq_vector *ioq_vector;
+ int cpu_num;
+ int size;
+ int i;
+
+ size = sizeof(struct octeon_ioq_vector) * num_ioqs;
+
+ oct->ioq_vector = vzalloc(size);
+ if (!oct->ioq_vector)
+ return -1;
+ for (i = 0; i < num_ioqs; i++) {
+ ioq_vector = &oct->ioq_vector[i];
+ ioq_vector->oct_dev = oct;
+ ioq_vector->iq_index = i;
+ ioq_vector->droq_index = i;
+ ioq_vector->mbox = oct->mbox[i];
+
+ cpu_num = i % num_online_cpus();
+ cpumask_set_cpu(cpu_num, &ioq_vector->affinity_mask);
+
+ if (oct->chip_id == OCTEON_CN23XX_PF_VID)
+ ioq_vector->ioq_num = i + oct->sriov_info.pf_srn;
+ else
+ ioq_vector->ioq_num = i;
+ }
+
+ return 0;
+}
+
+void
+octeon_free_ioq_vector(struct octeon_device *oct)
+{
+ vfree(oct->ioq_vector);
+}
+
+/* this function is only for setting up the first queue */
+int octeon_setup_instr_queues(struct octeon_device *oct)
+{
+ u32 num_descs = 0;
+ u32 iq_no = 0;
+ union oct_txpciq txpciq;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ if (OCTEON_CN6XXX(oct))
+ num_descs =
+ CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn6xxx));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_pf));
+ else if (OCTEON_CN23XX_VF(oct))
+ num_descs = CFG_GET_NUM_DEF_TX_DESCS(CHIP_CONF(oct, cn23xx_vf));
+
+ oct->num_iqs = 0;
+
+ oct->instr_queue[0] = vzalloc_node(sizeof(*oct->instr_queue[0]),
+ numa_node);
+ if (!oct->instr_queue[0])
+ oct->instr_queue[0] =
+ vzalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[0])
+ return 1;
+ memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
+ oct->instr_queue[0]->q_index = 0;
+ oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ oct->instr_queue[0]->ifidx = 0;
+ txpciq.u64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.pkind = oct->pfvf_hsword.pkind;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
+ /* prevent memory leak */
+ vfree(oct->instr_queue[0]);
+ oct->instr_queue[0] = NULL;
+ return 1;
+ }
+
+ oct->num_iqs++;
+ return 0;
+}
+
+int octeon_setup_output_queues(struct octeon_device *oct)
+{
+ u32 num_descs = 0;
+ u32 desc_size = 0;
+ u32 oq_no = 0;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ if (OCTEON_CN6XXX(oct)) {
+ num_descs =
+ CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn6xxx));
+ desc_size =
+ CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn6xxx));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_pf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_pf));
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ num_descs = CFG_GET_NUM_DEF_RX_DESCS(CHIP_CONF(oct, cn23xx_vf));
+ desc_size = CFG_GET_DEF_RX_BUF_SIZE(CHIP_CONF(oct, cn23xx_vf));
+ }
+ oct->num_oqs = 0;
+ oct->droq[0] = vzalloc_node(sizeof(*oct->droq[0]), numa_node);
+ if (!oct->droq[0])
+ oct->droq[0] = vzalloc(sizeof(*oct->droq[0]));
+ if (!oct->droq[0])
+ return 1;
+
+ if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
+ vfree(oct->droq[oq_no]);
+ oct->droq[oq_no] = NULL;
+ return 1;
+ }
+ oct->num_oqs++;
+
+ return 0;
+}
+
+int octeon_set_io_queues_off(struct octeon_device *oct)
+{
+ int loop = BUSY_READING_REG_VF_LOOP_COUNT;
+
+ if (OCTEON_CN6XXX(oct)) {
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
+ } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) {
+ u32 q_no;
+
+ /* IOQs will already be in reset.
+ * If RST bit is set, wait for quiet bit to be set.
+ * Once quiet bit is set, clear the RST bit.
+ */
+ for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) {
+ u64 reg_val = octeon_read_csr64(
+ oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no));
+
+ while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
+ !(reg_val & CN23XX_PKT_INPUT_CTL_QUIET) &&
+ loop) {
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ loop--;
+ }
+ if (!loop) {
+ dev_err(&oct->pci_dev->dev,
+ "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
+ q_no);
+ return -1;
+ }
+
+ reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
+ octeon_write_csr64(oct,
+ CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
+ reg_val);
+
+ reg_val = octeon_read_csr64(
+ oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
+ if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
+ dev_err(&oct->pci_dev->dev,
+ "unable to reset qno %u\n", q_no);
+ return -1;
+ }
+ }
+ }
+ return 0;
+}
+
+void octeon_set_droq_pkt_op(struct octeon_device *oct,
+ u32 q_no,
+ u32 enable)
+{
+ u32 reg_val = 0;
+
+ /* Disable the i/p and o/p queues for this Octeon. */
+ if (OCTEON_CN6XXX(oct)) {
+ reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB);
+
+ if (enable)
+ reg_val = reg_val | (1 << q_no);
+ else
+ reg_val = reg_val & (~(1 << q_no));
+
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val);
+ }
+}
+
+int octeon_init_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+
+ oct->dispatch.count = 0;
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ oct->dispatch.dlist[i].opcode = 0;
+ INIT_LIST_HEAD(&oct->dispatch.dlist[i].list);
+ }
+
+ for (i = 0; i <= REQTYPE_LAST; i++)
+ octeon_register_reqtype_free_fn(oct, i, NULL);
+
+ spin_lock_init(&oct->dispatch.lock);
+
+ return 0;
+}
+
+void octeon_delete_dispatch_list(struct octeon_device *oct)
+{
+ u32 i;
+ struct list_head freelist, *temp, *tmp2;
+
+ INIT_LIST_HEAD(&freelist);
+
+ spin_lock_bh(&oct->dispatch.lock);
+
+ for (i = 0; i < DISPATCH_LIST_SIZE; i++) {
+ struct list_head *dispatch;
+
+ dispatch = &oct->dispatch.dlist[i].list;
+ while (dispatch->next != dispatch) {
+ temp = dispatch->next;
+ list_move_tail(temp, &freelist);
+ }
+
+ oct->dispatch.dlist[i].opcode = 0;
+ }
+
+ oct->dispatch.count = 0;
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ list_for_each_safe(temp, tmp2, &freelist) {
+ list_del(temp);
+ kfree(temp);
+ }
+}
+
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode)
+{
+ u32 idx;
+ struct list_head *dispatch;
+ octeon_dispatch_fn_t fn = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (!(octeon_dev->dispatch.dlist[idx].opcode)) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn = octeon_dev->dispatch.dlist[idx].dispatch_fn;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn = ((struct octeon_dispatch *)
+ dispatch)->dispatch_fn;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn;
+}
+
+/* octeon_register_dispatch_fn
+ * Parameters:
+ * octeon_id - id of the octeon device.
+ * opcode - opcode for which driver should call the registered function
+ * subcode - subcode for which driver should call the registered function
+ * fn - The function to call when a packet with "opcode" arrives in
+ * octeon output queues.
+ * fn_arg - The argument to be passed when calling function "fn".
+ * Description:
+ * Registers a function and its argument to be called when a packet
+ * arrives in Octeon output queues with "opcode".
+ * Returns:
+ * Success: 0
+ * Failure: 1
+ * Locks:
+ * No locks are held.
+ */
+int
+octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg)
+{
+ u32 idx;
+ octeon_dispatch_fn_t pfn;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&oct->dispatch.lock);
+ /* Add dispatch function to first level of lookup table */
+ if (oct->dispatch.dlist[idx].opcode == 0) {
+ oct->dispatch.dlist[idx].opcode = combined_opcode;
+ oct->dispatch.dlist[idx].dispatch_fn = fn;
+ oct->dispatch.dlist[idx].arg = fn_arg;
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+ return 0;
+ }
+
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ /* Check if there was a function already registered for this
+ * opcode/subcode.
+ */
+ pfn = octeon_get_dispatch(oct, opcode, subcode);
+ if (!pfn) {
+ struct octeon_dispatch *dispatch;
+
+ dev_dbg(&oct->pci_dev->dev,
+ "Adding opcode to dispatch list linked list\n");
+ dispatch = kmalloc(sizeof(*dispatch), GFP_KERNEL);
+ if (!dispatch)
+ return 1;
+
+ dispatch->opcode = combined_opcode;
+ dispatch->dispatch_fn = fn;
+ dispatch->arg = fn_arg;
+
+ /* Add dispatch function to linked list of fn ptrs
+ * at the hashed index.
+ */
+ spin_lock_bh(&oct->dispatch.lock);
+ list_add(&dispatch->list, &oct->dispatch.dlist[idx].list);
+ oct->dispatch.count++;
+ spin_unlock_bh(&oct->dispatch.lock);
+
+ } else {
+ if (pfn == fn &&
+ octeon_get_dispatch_arg(oct, opcode, subcode) == fn_arg)
+ return 0;
+
+ dev_err(&oct->pci_dev->dev,
+ "Found previously registered dispatch fn for opcode/subcode: %x/%x\n",
+ opcode, subcode);
+ return 1;
+ }
+
+ return 0;
+}
+
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
+{
+ u32 i;
+ char app_name[16];
+ struct octeon_device *oct = (struct octeon_device *)buf;
+ struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
+ struct octeon_core_setup *cs = NULL;
+ u32 num_nic_ports = 0;
+
+ if (OCTEON_CN6XXX(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn6xxx));
+ else if (OCTEON_CN23XX_PF(oct))
+ num_nic_ports =
+ CFG_GET_NUM_NIC_PORTS(CHIP_CONF(oct, cn23xx_pf));
+
+ if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) {
+ dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n",
+ atomic_read(&oct->status));
+ goto core_drv_init_err;
+ }
+
+ strncpy(app_name,
+ get_oct_app_string(
+ (u32)recv_pkt->rh.r_core_drv_init.app_mode),
+ sizeof(app_name) - 1);
+ oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) {
+ oct->fw_info.max_nic_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports;
+ oct->fw_info.num_gmx_ports =
+ (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports;
+ }
+
+ if (oct->fw_info.max_nic_ports < num_nic_ports) {
+ dev_err(&oct->pci_dev->dev,
+ "Config has more ports than firmware allows (%d > %d).\n",
+ num_nic_ports, oct->fw_info.max_nic_ports);
+ goto core_drv_init_err;
+ }
+ oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags;
+ oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+ oct->pfvf_hsword.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode;
+
+ oct->pfvf_hsword.pkind = recv_pkt->rh.r_core_drv_init.pkind;
+
+ for (i = 0; i < oct->num_iqs; i++)
+ oct->instr_queue[i]->txpciq.s.pkind = oct->pfvf_hsword.pkind;
+
+ atomic_set(&oct->status, OCT_DEV_CORE_OK);
+
+ cs = &core_setup[oct->octeon_id];
+
+ if (recv_pkt->buffer_size[0] != (sizeof(*cs) + OCT_DROQ_INFO_SIZE)) {
+ dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n",
+ (u32)sizeof(*cs),
+ recv_pkt->buffer_size[0]);
+ }
+
+ memcpy(cs, get_rbd(
+ recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE, sizeof(*cs));
+
+ strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME);
+ strncpy(oct->boardinfo.serial_number, cs->board_serial_number,
+ OCT_SERIAL_LEN);
+
+ octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3));
+
+ oct->boardinfo.major = cs->board_rev_major;
+ oct->boardinfo.minor = cs->board_rev_minor;
+
+ dev_info(&oct->pci_dev->dev,
+ "Running %s (%llu Hz)\n",
+ app_name, CVM_CAST64(cs->corefreq));
+
+core_drv_init_err:
+ for (i = 0; i < recv_pkt->buffer_count; i++)
+ recv_buffer_free(recv_pkt->buffer_ptr[i]);
+ octeon_free_recv_info(recv_info);
+ return 0;
+}
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
+
+{
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
+ (oct->io_qmask.iq & BIT_ULL(q_no)))
+ return oct->instr_queue[q_no]->max_count;
+
+ return -1;
+}
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
+{
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
+ (oct->io_qmask.oq & BIT_ULL(q_no)))
+ return oct->droq[q_no]->max_count;
+ return -1;
+}
+
+/* Retruns the host firmware handshake OCTEON specific configuration */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct)
+{
+ struct octeon_config *default_oct_conf = NULL;
+
+ /* check the OCTEON Device model & return the corresponding octeon
+ * configuration
+ */
+
+ if (OCTEON_CN6XXX(oct)) {
+ default_oct_conf =
+ (struct octeon_config *)(CHIP_CONF(oct, cn6xxx));
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_CONF(oct, cn23xx_pf));
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ default_oct_conf = (struct octeon_config *)
+ (CHIP_CONF(oct, cn23xx_vf));
+ }
+ return default_oct_conf;
+}
+
+/* scratch register address is same in all the OCT-II and CN70XX models */
+#define CNXX_SLI_SCRATCH1 0x3C0
+
+/* Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id)
+{
+ if (octeon_id >= MAX_OCTEON_DEVICES)
+ return NULL;
+ else
+ return octeon_device[octeon_id];
+}
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
+{
+ u64 val64;
+ unsigned long flags;
+ u32 addrhi;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ /* The windowed read happens when the LSB of the addr is written.
+ * So write MSB first
+ */
+ addrhi = (addr >> 32);
+ if ((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX) ||
+ (oct->chip_id == OCTEON_CN23XX_PF_VID))
+ addrhi |= 0x00060000;
+ writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
+
+ /* Read back to preserve ordering of writes */
+ readl(oct->reg_list.pci_win_rd_addr_hi);
+
+ writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
+ readl(oct->reg_list.pci_win_rd_addr_lo);
+
+ val64 = readq(oct->reg_list.pci_win_rd_data);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+
+ return val64;
+}
+
+void lio_pci_writeq(struct octeon_device *oct,
+ u64 val,
+ u64 addr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&oct->pci_win_lock, flags);
+
+ writeq(addr, oct->reg_list.pci_win_wr_addr);
+
+ /* The write happens when the LSB is written. So write MSB first. */
+ writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
+ /* Read the MSB to ensure ordering of writes. */
+ readl(oct->reg_list.pci_win_wr_data_hi);
+
+ writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
+
+ spin_unlock_irqrestore(&oct->pci_win_lock, flags);
+}
+
+int octeon_mem_access_ok(struct octeon_device *oct)
+{
+ u64 access_okay = 0;
+ u64 lmc0_reset_ctl;
+
+ /* Check to make sure a DDR interface is enabled */
+ if (OCTEON_CN23XX_PF(oct)) {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN23XX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN23XX_LMC0_RESET_CTL_DDR3RST_MASK);
+ } else {
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
+ access_okay =
+ (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
+ }
+
+ return access_okay ? 0 : 1;
+}
+
+int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
+{
+ int ret = 1;
+ u32 ms;
+
+ if (!timeout)
+ return ret;
+
+ for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
+ ms += HZ / 10) {
+ ret = octeon_mem_access_ok(oct);
+
+ /* wait 100 ms */
+ if (ret)
+ schedule_timeout_uninterruptible(HZ / 10);
+ }
+
+ return ret;
+}
+
+/* Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev)
+{
+ struct octeon_device *octeon_dev = (struct octeon_device *)dev;
+ u32 i;
+
+ for (i = 0; i < MAX_OCTEON_DEVICES; i++)
+ if (octeon_device[i] == octeon_dev)
+ return octeon_dev->octeon_id;
+ return -1;
+}
+
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
+{
+ u64 instr_cnt;
+ u32 pkts_pend;
+ struct octeon_device *oct = NULL;
+
+ /* the whole thing needs to be atomic, ideally */
+ if (droq) {
+ pkts_pend = (u32)atomic_read(&droq->pkts_pending);
+ writel(droq->pkt_count - pkts_pend, droq->pkts_sent_reg);
+ droq->pkt_count = pkts_pend;
+ oct = droq->oct_dev;
+ }
+ if (iq) {
+ spin_lock_bh(&iq->lock);
+ writel(iq->pkts_processed, iq->inst_cnt_reg);
+ iq->pkt_in_done -= iq->pkts_processed;
+ iq->pkts_processed = 0;
+ /* this write needs to be flushed before we release the lock */
+ spin_unlock_bh(&iq->lock);
+ oct = iq->oct_dev;
+ }
+ /*write resend. Writing RESEND in SLI_PKTX_CNTS should be enough
+ *to trigger tx interrupts as well, if they are pending.
+ */
+ if (oct && (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))) {
+ if (droq)
+ writeq(CN23XX_INTR_RESEND, droq->pkts_sent_reg);
+ /*we race with firmrware here. read and write the IN_DONE_CNTS*/
+ else if (iq) {
+ instr_cnt = readq(iq->inst_cnt_reg);
+ writeq(((instr_cnt & 0xFFFFFFFF00000000ULL) |
+ CN23XX_INTR_RESEND),
+ iq->inst_cnt_reg);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
new file mode 100644
index 000000000..fb380b4f3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -0,0 +1,911 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file octeon_device.h
+ * \brief Host Driver: This file defines the octeon device structure.
+ */
+
+#ifndef _OCTEON_DEVICE_H_
+#define _OCTEON_DEVICE_H_
+
+#include <linux/interrupt.h>
+#include <net/devlink.h>
+
+/** PCI VendorId Device Id */
+#define OCTEON_CN68XX_PCIID 0x91177d
+#define OCTEON_CN66XX_PCIID 0x92177d
+#define OCTEON_CN23XX_PCIID_PF 0x9702177d
+/** Driver identifies chips by these Ids, created by clubbing together
+ * DeviceId+RevisionId; Where Revision Id is not used to distinguish
+ * between chips, a value of 0 is used for revision id.
+ */
+#define OCTEON_CN68XX 0x0091
+#define OCTEON_CN66XX 0x0092
+#define OCTEON_CN23XX_PF_VID 0x9702
+#define OCTEON_CN23XX_VF_VID 0x9712
+
+/**RevisionId for the chips */
+#define OCTEON_CN23XX_REV_1_0 0x00
+#define OCTEON_CN23XX_REV_1_1 0x01
+#define OCTEON_CN23XX_REV_2_0 0x80
+
+/**SubsystemId for the chips */
+#define OCTEON_CN2350_10GB_SUBSYS_ID_1 0X3177d
+#define OCTEON_CN2350_10GB_SUBSYS_ID_2 0X4177d
+#define OCTEON_CN2360_10GB_SUBSYS_ID 0X5177d
+#define OCTEON_CN2350_25GB_SUBSYS_ID 0X7177d
+#define OCTEON_CN2360_25GB_SUBSYS_ID 0X6177d
+
+/** Endian-swap modes supported by Octeon. */
+enum octeon_pci_swap_mode {
+ OCTEON_PCI_PASSTHROUGH = 0,
+ OCTEON_PCI_64BIT_SWAP = 1,
+ OCTEON_PCI_32BIT_BYTE_SWAP = 2,
+ OCTEON_PCI_32BIT_LW_SWAP = 3
+};
+
+enum lio_fw_state {
+ FW_IS_PRELOADED = 0,
+ FW_NEEDS_TO_BE_LOADED = 1,
+ FW_IS_BEING_LOADED = 2,
+ FW_HAS_BEEN_LOADED = 3,
+};
+
+enum {
+ OCTEON_CONFIG_TYPE_DEFAULT = 0,
+ NUM_OCTEON_CONFS,
+};
+
+#define OCTEON_INPUT_INTR (1)
+#define OCTEON_OUTPUT_INTR (2)
+#define OCTEON_MBOX_INTR (4)
+#define OCTEON_ALL_INTR 0xff
+
+/*--------------- PCI BAR1 index registers -------------*/
+
+/* BAR1 Mask */
+#define PCI_BAR1_ENABLE_CA 1
+#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP
+#define PCI_BAR1_ENTRY_VALID 1
+#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \
+ | (PCI_BAR1_ENDIAN_MODE << 1) \
+ | PCI_BAR1_ENTRY_VALID)
+
+/** Octeon Device state.
+ * Each octeon device goes through each of these states
+ * as it is initialized.
+ */
+#define OCT_DEV_BEGIN_STATE 0x0
+#define OCT_DEV_PCI_ENABLE_DONE 0x1
+#define OCT_DEV_PCI_MAP_DONE 0x2
+#define OCT_DEV_DISPATCH_INIT_DONE 0x3
+#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x4
+#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x5
+#define OCT_DEV_RESP_LIST_INIT_DONE 0x6
+#define OCT_DEV_DROQ_INIT_DONE 0x7
+#define OCT_DEV_MBOX_SETUP_DONE 0x8
+#define OCT_DEV_MSIX_ALLOC_VECTOR_DONE 0x9
+#define OCT_DEV_INTR_SET_DONE 0xa
+#define OCT_DEV_IO_QUEUES_DONE 0xb
+#define OCT_DEV_CONSOLE_INIT_DONE 0xc
+#define OCT_DEV_HOST_OK 0xd
+#define OCT_DEV_CORE_OK 0xe
+#define OCT_DEV_RUNNING 0xf
+#define OCT_DEV_IN_RESET 0x10
+#define OCT_DEV_STATE_INVALID 0x11
+
+#define OCT_DEV_STATES OCT_DEV_STATE_INVALID
+
+/** Octeon Device interrupts
+ * These interrupt bits are set in int_status filed of
+ * octeon_device structure
+ */
+#define OCT_DEV_INTR_DMA0_FORCE 0x01
+#define OCT_DEV_INTR_DMA1_FORCE 0x02
+#define OCT_DEV_INTR_PKT_DATA 0x04
+
+#define LIO_RESET_SECS (3)
+
+/*---------------------------DISPATCH LIST-------------------------------*/
+
+/** The dispatch list entry.
+ * The driver keeps a record of functions registered for each
+ * response header opcode in this structure. Since the opcode is
+ * hashed to index into the driver's list, more than one opcode
+ * can hash to the same entry, in which case the list field points
+ * to a linked list with the other entries.
+ */
+struct octeon_dispatch {
+ /** List head for this entry */
+ struct list_head list;
+
+ /** The opcode for which the dispatch function & arg should be used */
+ u16 opcode;
+
+ /** The function to be called for a packet received by the driver */
+ octeon_dispatch_fn_t dispatch_fn;
+
+ /* The application specified argument to be passed to the above
+ * function along with the received packet
+ */
+ void *arg;
+};
+
+/** The dispatch list structure. */
+struct octeon_dispatch_list {
+ /** access to dispatch list must be atomic */
+ spinlock_t lock;
+
+ /** Count of dispatch functions currently registered */
+ u32 count;
+
+ /** The list of dispatch functions */
+ struct octeon_dispatch *dlist;
+};
+
+/*----------------------- THE OCTEON DEVICE ---------------------------*/
+
+#define OCT_MEM_REGIONS 3
+/** PCI address space mapping information.
+ * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of
+ * Octeon gets mapped to different physical address spaces in
+ * the kernel.
+ */
+struct octeon_mmio {
+ /** PCI address to which the BAR is mapped. */
+ u64 start;
+
+ /** Length of this PCI address space. */
+ u32 len;
+
+ /** Length that has been mapped to phys. address space. */
+ u32 mapped_len;
+
+ /** The physical address to which the PCI address space is mapped. */
+ u8 __iomem *hw_addr;
+
+ /** Flag indicating the mapping was successful. */
+ u32 done;
+};
+
+#define MAX_OCTEON_MAPS 32
+
+struct octeon_io_enable {
+ u64 iq;
+ u64 oq;
+ u64 iq64B;
+};
+
+struct octeon_reg_list {
+ u32 __iomem *pci_win_wr_addr_hi;
+ u32 __iomem *pci_win_wr_addr_lo;
+ u64 __iomem *pci_win_wr_addr;
+
+ u32 __iomem *pci_win_rd_addr_hi;
+ u32 __iomem *pci_win_rd_addr_lo;
+ u64 __iomem *pci_win_rd_addr;
+
+ u32 __iomem *pci_win_wr_data_hi;
+ u32 __iomem *pci_win_wr_data_lo;
+ u64 __iomem *pci_win_wr_data;
+
+ u32 __iomem *pci_win_rd_data_hi;
+ u32 __iomem *pci_win_rd_data_lo;
+ u64 __iomem *pci_win_rd_data;
+};
+
+#define OCTEON_CONSOLE_MAX_READ_BYTES 512
+typedef int (*octeon_console_print_fn)(struct octeon_device *oct,
+ u32 num, char *pre, char *suf);
+struct octeon_console {
+ u32 active;
+ u32 waiting;
+ u64 addr;
+ u32 buffer_size;
+ u64 input_base_addr;
+ u64 output_base_addr;
+ octeon_console_print_fn print;
+ char leftover[OCTEON_CONSOLE_MAX_READ_BYTES];
+};
+
+struct octeon_board_info {
+ char name[OCT_BOARD_NAME];
+ char serial_number[OCT_SERIAL_LEN];
+ u64 major;
+ u64 minor;
+};
+
+struct octeon_fn_list {
+ void (*setup_iq_regs)(struct octeon_device *, u32);
+ void (*setup_oq_regs)(struct octeon_device *, u32);
+
+ irqreturn_t (*process_interrupt_regs)(void *);
+ u64 (*msix_interrupt_handler)(void *);
+
+ int (*setup_mbox)(struct octeon_device *);
+ int (*free_mbox)(struct octeon_device *);
+
+ int (*soft_reset)(struct octeon_device *);
+ int (*setup_device_regs)(struct octeon_device *);
+ void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
+ void (*bar1_idx_write)(struct octeon_device *, u32, u32);
+ u32 (*bar1_idx_read)(struct octeon_device *, u32);
+ u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
+
+ void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
+ void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
+
+ void (*enable_interrupt)(struct octeon_device *, u8);
+ void (*disable_interrupt)(struct octeon_device *, u8);
+
+ int (*enable_io_queues)(struct octeon_device *);
+ void (*disable_io_queues)(struct octeon_device *);
+};
+
+/* Must be multiple of 8, changing breaks ABI */
+#define CVMX_BOOTMEM_NAME_LEN 128
+
+/* Structure for named memory blocks
+ * Number of descriptors
+ * available can be changed without affecting compatibility,
+ * but name length changes require a bump in the bootmem
+ * descriptor version
+ * Note: This structure must be naturally 64 bit aligned, as a single
+ * memory image will be used by both 32 and 64 bit programs.
+ */
+struct cvmx_bootmem_named_block_desc {
+ /** Base address of named block */
+ u64 base_addr;
+
+ /** Size actually allocated for named block */
+ u64 size;
+
+ /** name of named block */
+ char name[CVMX_BOOTMEM_NAME_LEN];
+};
+
+struct oct_fw_info {
+ u32 max_nic_ports; /** max nic ports for the device */
+ u32 num_gmx_ports; /** num gmx ports */
+ u64 app_cap_flags; /** firmware cap flags */
+
+ /** The core application is running in this mode.
+ * See octeon-drv-opcodes.h for values.
+ */
+ u32 app_mode;
+ char liquidio_firmware_version[32];
+ /* Fields extracted from legacy string 'liquidio_firmware_version' */
+ struct {
+ u8 maj;
+ u8 min;
+ u8 rev;
+ } ver;
+};
+
+#define OCT_FW_VER(maj, min, rev) \
+ (((u32)(maj) << 16) | ((u32)(min) << 8) | ((u32)(rev)))
+
+/* wrappers around work structs */
+struct cavium_wk {
+ struct delayed_work work;
+ void *ctxptr;
+ u64 ctxul;
+};
+
+struct cavium_wq {
+ struct workqueue_struct *wq;
+ struct cavium_wk wk;
+};
+
+struct octdev_props {
+ /* Each interface in the Octeon device has a network
+ * device pointer (used for OS specific calls).
+ */
+ int rx_on;
+ int fec;
+ int fec_boot;
+ int napi_enabled;
+ int gmxport;
+ struct net_device *netdev;
+};
+
+#define LIO_FLAG_MSIX_ENABLED 0x1
+#define MSIX_PO_INT 0x1
+#define MSIX_PI_INT 0x2
+#define MSIX_MBOX_INT 0x4
+
+struct octeon_pf_vf_hs_word {
+#ifdef __LITTLE_ENDIAN_BITFIELD
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+#else
+
+ /** RESERVED */
+ u64 reserved : 16;
+
+ /** app that currently running on OCTEON */
+ u64 app_mode : 8;
+
+ /** OCTEON coprocessor clock multiplier */
+ u64 coproc_tics_per_us : 16;
+
+ /** OCTEON core clock multiplier */
+ u64 core_tics_per_us : 16;
+
+ /** PKIND value assigned for the DPI interface */
+ u64 pkind : 8;
+#endif
+};
+
+struct octeon_sriov_info {
+ /* Number of rings assigned to VF */
+ u32 rings_per_vf;
+
+ /** Max Number of VF devices that can be enabled. This variable can
+ * specified during load time or it will be derived after allocating
+ * PF queues. When max_vfs is derived then each VF will get one queue
+ **/
+ u32 max_vfs;
+
+ /** Number of VF devices enabled using sysfs. */
+ u32 num_vfs_alloced;
+
+ /* Actual rings left for PF device */
+ u32 num_pf_rings;
+
+ /* SRN of PF usable IO queues */
+ u32 pf_srn;
+
+ /* total pf rings */
+ u32 trs;
+
+ u32 sriov_enabled;
+
+ struct lio_trusted_vf trusted_vf;
+
+ /*lookup table that maps DPI ring number to VF pci_dev struct pointer*/
+ struct pci_dev *dpiring_to_vfpcidev_lut[MAX_POSSIBLE_VFS];
+
+ u64 vf_macaddr[MAX_POSSIBLE_VFS];
+
+ u16 vf_vlantci[MAX_POSSIBLE_VFS];
+
+ int vf_linkstate[MAX_POSSIBLE_VFS];
+
+ bool vf_spoofchk[MAX_POSSIBLE_VFS];
+
+ u64 vf_drv_loaded_mask;
+};
+
+struct octeon_ioq_vector {
+ struct octeon_device *oct_dev;
+ int iq_index;
+ int droq_index;
+ int vector;
+ struct octeon_mbox *mbox;
+ struct cpumask affinity_mask;
+ u32 ioq_num;
+};
+
+struct lio_vf_rep_list {
+ int num_vfs;
+ struct net_device *ndev[CN23XX_MAX_VFS_PER_PF];
+};
+
+struct lio_devlink_priv {
+ struct octeon_device *oct;
+};
+
+/** The Octeon device.
+ * Each Octeon device has this structure to represent all its
+ * components.
+ */
+struct octeon_device {
+ /** Lock for PCI window configuration accesses */
+ spinlock_t pci_win_lock;
+
+ /** Lock for memory accesses */
+ spinlock_t mem_access_lock;
+
+ /** PCI device pointer */
+ struct pci_dev *pci_dev;
+
+ /** Chip specific information. */
+ void *chip;
+
+ /** Number of interfaces detected in this octeon device. */
+ u32 ifcount;
+
+ struct octdev_props props[MAX_OCTEON_LINKS];
+
+ /** Octeon Chip type. */
+ u16 chip_id;
+
+ u16 rev_id;
+
+ u32 subsystem_id;
+
+ u16 pf_num;
+
+ u16 vf_num;
+
+ /** This device's id - set by the driver. */
+ u32 octeon_id;
+
+ /** This device's PCIe port used for traffic. */
+ u16 pcie_port;
+
+ u16 flags;
+#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1)
+
+ /** The state of this device */
+ atomic_t status;
+
+ /** memory mapped io range */
+ struct octeon_mmio mmio[OCT_MEM_REGIONS];
+
+ struct octeon_reg_list reg_list;
+
+ struct octeon_fn_list fn_list;
+
+ struct octeon_board_info boardinfo;
+
+ u32 num_iqs;
+
+ /* The pool containing pre allocated buffers used for soft commands */
+ struct octeon_sc_buffer_pool sc_buf_pool;
+
+ /** The input instruction queues */
+ struct octeon_instr_queue *instr_queue
+ [MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
+
+ /** The doubly-linked list of instruction response */
+ struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
+
+ u32 num_oqs;
+
+ /** The DROQ output queues */
+ struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
+
+ struct octeon_io_enable io_qmask;
+
+ /** List of dispatch functions */
+ struct octeon_dispatch_list dispatch;
+
+ u32 int_status;
+
+ u64 droq_intr;
+
+ /** Physical location of the cvmx_bootmem_desc_t in octeon memory */
+ u64 bootmem_desc_addr;
+
+ /** Placeholder memory for named blocks.
+ * Assumes single-threaded access
+ */
+ struct cvmx_bootmem_named_block_desc bootmem_named_block_desc;
+
+ /** Address of consoles descriptor */
+ u64 console_desc_addr;
+
+ /** Number of consoles available. 0 means they are inaccessible */
+ u32 num_consoles;
+
+ /* Console caches */
+ struct octeon_console console[MAX_OCTEON_MAPS];
+
+ /* Console named block info */
+ struct {
+ u64 dram_region_base;
+ int bar1_index;
+ } console_nb_info;
+
+ /* Coprocessor clock rate. */
+ u64 coproc_clock_rate;
+
+ /** The core application is running in this mode. See liquidio_common.h
+ * for values.
+ */
+ u32 app_mode;
+
+ struct oct_fw_info fw_info;
+
+ /** The name given to this device. */
+ char device_name[32];
+
+ /** Application Context */
+ void *app_ctx;
+
+ struct cavium_wq dma_comp_wq;
+
+ /** Lock for dma response list */
+ spinlock_t cmd_resp_wqlock;
+ u32 cmd_resp_state;
+
+ struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
+
+ struct cavium_wk nic_poll_work;
+
+ struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
+
+ void *priv;
+
+ int num_msix_irqs;
+
+ void *msix_entries;
+
+ /* when requesting IRQs, the names are stored here */
+ void *irq_name_storage;
+
+ struct octeon_sriov_info sriov_info;
+
+ struct octeon_pf_vf_hs_word pfvf_hsword;
+
+ int msix_on;
+
+ /** Mail Box details of each octeon queue. */
+ struct octeon_mbox *mbox[MAX_POSSIBLE_VFS];
+
+ /** IOq information of it's corresponding MSI-X interrupt. */
+ struct octeon_ioq_vector *ioq_vector;
+
+ int rx_pause;
+ int tx_pause;
+
+ struct oct_link_stats link_stats; /*stastics from firmware*/
+
+ /* private flags to control driver-specific features through ethtool */
+ u32 priv_flags;
+
+ void *watchdog_task;
+
+ u32 rx_coalesce_usecs;
+ u32 rx_max_coalesced_frames;
+ u32 tx_max_coalesced_frames;
+
+ bool cores_crashed;
+
+ struct {
+ int bus;
+ int dev;
+ int func;
+ } loc;
+
+ atomic_t *adapter_refcount; /* reference count of adapter */
+
+ atomic_t *adapter_fw_state; /* per-adapter, lio_fw_state */
+
+ bool ptp_enable;
+
+ struct lio_vf_rep_list vf_rep_list;
+ struct devlink *devlink;
+ enum devlink_eswitch_mode eswitch_mode;
+
+ /* for 25G NIC speed change */
+ u8 speed_boot;
+ u8 speed_setting;
+ u8 no_speed_setting;
+
+ u32 vfstats_poll;
+#define LIO_VFSTATS_POLL 10
+};
+
+#define OCT_DRV_ONLINE 1
+#define OCT_DRV_OFFLINE 2
+#define OCTEON_CN6XXX(oct) ({ \
+ typeof(oct) _oct = (oct); \
+ ((_oct->chip_id == OCTEON_CN66XX) || \
+ (_oct->chip_id == OCTEON_CN68XX)); })
+#define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID)
+#define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID)
+#define CHIP_CONF(oct, TYPE) \
+ (((struct octeon_ ## TYPE *)((oct)->chip))->conf)
+
+#define MAX_IO_PENDING_PKT_COUNT 100
+
+/*------------------ Function Prototypes ----------------------*/
+
+/** Initialize device list memory */
+void octeon_init_device_list(int conf_type);
+
+/** Free memory for Input and Output queue structures for a octeon device */
+void octeon_free_device_mem(struct octeon_device *oct);
+
+/* Look up a free entry in the octeon_device table and allocate resources
+ * for the octeon_device structure for an octeon device. Called at init
+ * time.
+ */
+struct octeon_device *octeon_allocate_device(u32 pci_id,
+ u32 priv_size);
+
+/** Register a device's bus location at initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param bus - PCIe bus #
+ * @param dev - PCIe device #
+ * @param func - PCIe function #
+ * @param is_pf - TRUE for PF, FALSE for VF
+ * @return reference count of device's adapter
+ */
+int octeon_register_device(struct octeon_device *oct,
+ int bus, int dev, int func, int is_pf);
+
+/** Deregister a device at de-initialization time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return reference count of device's adapter
+ */
+int octeon_deregister_device(struct octeon_device *oct);
+
+/** Initialize the driver's dispatch list which is a mix of a hash table
+ * and a linked list. This is done at driver load time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @return 0 on success, else -ve error value
+ */
+int octeon_init_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Delete the driver's dispatch list and all registered entries.
+ * This is done at driver unload time.
+ * @param octeon_dev - pointer to the octeon device structure.
+ */
+void octeon_delete_dispatch_list(struct octeon_device *octeon_dev);
+
+/** Initialize the core device fields with the info returned by the FW.
+ * @param recv_info - Receive info structure
+ * @param buf - Receive buffer
+ */
+int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf);
+
+/** Gets the dispatch function registered to receive packets with a
+ * given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch function
+ * is to checked.
+ * @param subcode - the subcode for which the dispatch function
+ * is to checked.
+ *
+ * @return Success: octeon_dispatch_fn_t (dispatch function pointer)
+ * @return Failure: NULL
+ *
+ * Looks up the dispatch list to get the dispatch function for a
+ * given opcode.
+ */
+octeon_dispatch_fn_t
+octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode,
+ u16 subcode);
+
+/** Get the octeon device pointer.
+ * @param octeon_id - The id for which the octeon device pointer is required.
+ * @return Success: Octeon device pointer.
+ * @return Failure: NULL.
+ */
+struct octeon_device *lio_get_device(u32 octeon_id);
+
+/** Get the octeon id assigned to the octeon device passed as argument.
+ * This function is exported to other modules.
+ * @param dev - octeon device pointer passed as a void *.
+ * @return octeon device id
+ */
+int lio_get_device_id(void *dev);
+
+/** Read windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param addr - Address of the register to read.
+ *
+ * This routine is called to read from the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return - 64 bit value read from the register.
+ */
+
+u64 lio_pci_readq(struct octeon_device *oct, u64 addr);
+
+/** Write windowed register.
+ * @param oct - pointer to the Octeon device.
+ * @param val - Value to write
+ * @param addr - Address of the register to write
+ *
+ * This routine is called to write to the indirectly accessed
+ * Octeon registers that are visible through a PCI BAR0 mapped window
+ * register.
+ * @return Nothing.
+ */
+void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr);
+
+/* Routines for reading and writing CSRs */
+#define octeon_write_csr(oct_dev, reg_off, value) \
+ writel(value, (oct_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octeon_write_csr64(oct_dev, reg_off, val64) \
+ writeq(val64, (oct_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octeon_read_csr(oct_dev, reg_off) \
+ readl((oct_dev)->mmio[0].hw_addr + (reg_off))
+
+#define octeon_read_csr64(oct_dev, reg_off) \
+ readq((oct_dev)->mmio[0].hw_addr + (reg_off))
+
+/**
+ * Checks if memory access is okay
+ *
+ * @param oct which octeon to send to
+ * @return Zero on success, negative on failure.
+ */
+int octeon_mem_access_ok(struct octeon_device *oct);
+
+/**
+ * Waits for DDR initialization.
+ *
+ * @param oct which octeon to send to
+ * @param timeout_in_ms pointer to how long to wait until DDR is initialized
+ * in ms.
+ * If contents are 0, it waits until contents are non-zero
+ * before starting to check.
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_ddr_init(struct octeon_device *oct,
+ u32 *timeout_in_ms);
+
+/**
+ * Wait for u-boot to boot and be waiting for a command.
+ *
+ * @param wait_time_hundredths
+ * Maximum time to wait
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_wait_for_bootloader(struct octeon_device *oct,
+ u32 wait_time_hundredths);
+
+/**
+ * Initialize console access
+ *
+ * @param oct which octeon initialize
+ * @return Zero on success, negative on failure.
+ */
+int octeon_init_consoles(struct octeon_device *oct);
+
+/**
+ * Adds access to a console to the device.
+ *
+ * @param oct: which octeon to add to
+ * @param console_num: which console
+ * @param dbg_enb: ptr to debug enablement string, one of:
+ * * NULL for no debug output (i.e. disabled)
+ * * empty string enables debug output (via default method)
+ * * specific string to enable debug console output
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_add_console(struct octeon_device *oct, u32 console_num,
+ char *dbg_enb);
+
+/** write or read from a console */
+int octeon_console_write(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 write_request_size, u32 flags);
+int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
+
+int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
+
+/** Removes all attached consoles. */
+void octeon_remove_consoles(struct octeon_device *oct);
+
+/**
+ * Send a string to u-boot on console 0 as a command.
+ *
+ * @param oct which octeon to send to
+ * @param cmd_str String to send
+ * @param wait_hundredths Time to wait for u-boot to accept the command.
+ *
+ * @return Zero on success, negative on failure.
+ */
+int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
+ u32 wait_hundredths);
+
+/** Parses, validates, and downloads firmware, then boots associated cores.
+ * @param oct which octeon to download firmware to
+ * @param data - The complete firmware file image
+ * @param size - The size of the data
+ *
+ * @return 0 if success.
+ * -EINVAL if file is incompatible or badly formatted.
+ * -ENODEV if no handler was found for the application type or an
+ * invalid octeon id was passed.
+ */
+int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
+ size_t size);
+
+char *lio_get_state_string(atomic_t *state_ptr);
+
+/** Sets up instruction queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_instr_queues(struct octeon_device *oct);
+
+/** Sets up output queues for the device
+ * @param oct which octeon to setup
+ *
+ * @return 0 if success. 1 if fails
+ */
+int octeon_setup_output_queues(struct octeon_device *oct);
+
+int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no);
+
+int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no);
+
+/** Turns off the input and output queues for the device
+ * @param oct which octeon to disable
+ */
+int octeon_set_io_queues_off(struct octeon_device *oct);
+
+/** Turns on or off the given output queue for the device
+ * @param oct which octeon to change
+ * @param q_no which queue
+ * @param enable 1 to enable, 0 to disable
+ */
+void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable);
+
+/** Retrieve the config for the device
+ * @param oct which octeon
+ * @param card_type type of card
+ *
+ * @returns pointer to configuration
+ */
+void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
+
+/** Gets the octeon device configuration
+ * @return - pointer to the octeon configuration struture
+ */
+struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+
+void octeon_free_ioq_vector(struct octeon_device *oct);
+int octeon_allocate_ioq_vector(struct octeon_device *oct, u32 num_ioqs);
+void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq);
+
+/* LiquidIO driver pivate flags */
+enum {
+ OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
+};
+
+#define OCT_PRIV_FLAG_DEFAULT 0x0
+
+static inline u32 lio_get_priv_flag(struct octeon_device *octdev, u32 flag)
+{
+ return !!(octdev->priv_flags & (0x1 << flag));
+}
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev,
+ u32 flag, u32 val)
+{
+ if (val)
+ octdev->priv_flags |= (0x1 << flag);
+ else
+ octdev->priv_flags &= ~(0x1 << flag);
+}
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
new file mode 100644
index 000000000..d4080bddc
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -0,0 +1,969 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_regs.h"
+#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
+
+struct niclist {
+ struct list_head list;
+ void *ptr;
+};
+
+struct __dispatch {
+ struct list_head list;
+ struct octeon_recv_info *rinfo;
+ octeon_dispatch_fn_t disp_fn;
+};
+
+/** Get the argument that the user set when registering dispatch
+ * function for a given opcode/subcode.
+ * @param octeon_dev - the octeon device pointer.
+ * @param opcode - the opcode for which the dispatch argument
+ * is to be checked.
+ * @param subcode - the subcode for which the dispatch argument
+ * is to be checked.
+ * @return Success: void * (argument to the dispatch function)
+ * @return Failure: NULL
+ *
+ */
+void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
+ u16 opcode, u16 subcode)
+{
+ int idx;
+ struct list_head *dispatch;
+ void *fn_arg = NULL;
+ u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
+
+ idx = combined_opcode & OCTEON_OPCODE_MASK;
+
+ spin_lock_bh(&octeon_dev->dispatch.lock);
+
+ if (octeon_dev->dispatch.count == 0) {
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return NULL;
+ }
+
+ if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
+ fn_arg = octeon_dev->dispatch.dlist[idx].arg;
+ } else {
+ list_for_each(dispatch,
+ &octeon_dev->dispatch.dlist[idx].list) {
+ if (((struct octeon_dispatch *)dispatch)->opcode ==
+ combined_opcode) {
+ fn_arg = ((struct octeon_dispatch *)
+ dispatch)->arg;
+ break;
+ }
+ }
+ }
+
+ spin_unlock_bh(&octeon_dev->dispatch.lock);
+ return fn_arg;
+}
+
+/** Check for packets on Droq. This function should be called with lock held.
+ * @param droq - Droq on which count is checked.
+ * @return Returns packet count.
+ */
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
+{
+ u32 pkt_count = 0;
+ u32 last_count;
+
+ pkt_count = readl(droq->pkts_sent_reg);
+
+ last_count = pkt_count - droq->pkt_count;
+ droq->pkt_count = pkt_count;
+
+ /* we shall write to cnts at napi irq enable or end of droq tasklet */
+ if (last_count)
+ atomic_add(last_count, &droq->pkts_pending);
+
+ return last_count;
+}
+
+static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
+{
+ u32 count = 0;
+
+ /* max_empty_descs is the max. no. of descs that can have no buffers.
+ * If the empty desc count goes beyond this value, we cannot safely
+ * read in a 64K packet sent by Octeon
+ * (64K is max pkt size from Octeon)
+ */
+ droq->max_empty_descs = 0;
+
+ do {
+ droq->max_empty_descs++;
+ count += droq->buffer_size;
+ } while (count < (64 * 1024));
+
+ droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
+}
+
+static void octeon_droq_reset_indices(struct octeon_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ atomic_set(&droq->pkts_pending, 0);
+}
+
+static void
+octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+ struct octeon_skb_page_info *pg_info;
+
+ for (i = 0; i < droq->max_count; i++) {
+ pg_info = &droq->recv_buf_list[i].pg_info;
+ if (!pg_info)
+ continue;
+
+ if (pg_info->dma)
+ lio_unmap_ring(oct->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->dma = 0;
+
+ if (pg_info->page)
+ recv_buffer_destroy(droq->recv_buf_list[i].buffer,
+ pg_info);
+
+ droq->recv_buf_list[i].buffer = NULL;
+ }
+
+ octeon_droq_reset_indices(droq);
+}
+
+static int
+octeon_droq_setup_ring_buffers(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ u32 i;
+ void *buf;
+ struct octeon_droq_desc *desc_ring = droq->desc_ring;
+
+ for (i = 0; i < droq->max_count; i++) {
+ buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
+
+ if (!buf) {
+ dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
+ __func__);
+ droq->stats.rx_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[i].buffer = buf;
+ droq->recv_buf_list[i].data = get_rbd(buf);
+ desc_ring[i].info_ptr = 0;
+ desc_ring[i].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[i].buffer);
+ }
+
+ octeon_droq_reset_indices(droq);
+
+ octeon_droq_compute_max_packet_bufs(droq);
+
+ return 0;
+}
+
+int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
+{
+ struct octeon_droq *droq = oct->droq[q_no];
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ octeon_droq_destroy_ring_buffers(oct, droq);
+ vfree(droq->recv_buf_list);
+
+ if (droq->desc_ring)
+ lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
+ droq->desc_ring, droq->desc_ring_dma);
+
+ memset(droq, 0, OCT_DROQ_SIZE);
+ oct->io_qmask.oq &= ~(1ULL << q_no);
+ vfree(oct->droq[q_no]);
+ oct->droq[q_no] = NULL;
+ oct->num_oqs--;
+
+ return 0;
+}
+
+int octeon_init_droq(struct octeon_device *oct,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx)
+{
+ struct octeon_droq *droq;
+ u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
+ u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
+
+ droq = oct->droq[q_no];
+ memset(droq, 0, OCT_DROQ_SIZE);
+
+ droq->oct_dev = oct;
+ droq->q_no = q_no;
+ if (app_ctx)
+ droq->app_ctx = app_ctx;
+ else
+ droq->app_ctx = (void *)(size_t)q_no;
+
+ c_num_descs = num_descs;
+ c_buf_size = desc_size;
+ if (OCTEON_CN6XXX(oct)) {
+ struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
+ c_refill_threshold =
+ (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else if (OCTEON_CN23XX_PF(oct)) {
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
+ } else if (OCTEON_CN23XX_VF(oct)) {
+ struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
+
+ c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
+ c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
+ } else {
+ return 1;
+ }
+
+ droq->max_count = c_num_descs;
+ droq->buffer_size = c_buf_size;
+
+ desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
+
+ if (!droq->desc_ring) {
+ dev_err(&oct->pci_dev->dev,
+ "Output queue %d ring alloc failed\n", q_no);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, droq->desc_ring_dma);
+ dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
+ droq->max_count);
+
+ droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
+ numa_node);
+ if (!droq->recv_buf_list)
+ droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
+ if (!droq->recv_buf_list) {
+ dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (octeon_droq_setup_ring_buffers(oct, droq))
+ goto init_droq_fail;
+
+ droq->pkts_per_intr = c_pkts_per_intr;
+ droq->refill_threshold = c_refill_threshold;
+
+ dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
+ droq->max_empty_descs);
+
+ INIT_LIST_HEAD(&droq->dispatch_list);
+
+ /* For 56xx Pass1, this function won't be called, so no checks. */
+ oct->fn_list.setup_oq_regs(oct, q_no);
+
+ oct->io_qmask.oq |= BIT_ULL(q_no);
+
+ return 0;
+
+init_droq_fail:
+ octeon_delete_droq(oct, q_no);
+ return 1;
+}
+
+/* octeon_create_recv_info
+ * Parameters:
+ * octeon_dev - pointer to the octeon device structure
+ * droq - droq in which the packet arrived.
+ * buf_cnt - no. of buffers used by the packet.
+ * idx - index in the descriptor for the first buffer in the packet.
+ * Description:
+ * Allocates a recv_info_t and copies the buffer addresses for packet data
+ * into the recv_pkt space which starts at an 8B offset from recv_info_t.
+ * Flags the descriptors for refill later. If available descriptors go
+ * below the threshold to receive a 64K pkt, new buffers are first allocated
+ * before the recv_pkt_t is created.
+ * This routine will be called in interrupt context.
+ * Returns:
+ * Success: Pointer to recv_info_t
+ * Failure: NULL.
+ */
+static inline struct octeon_recv_info *octeon_create_recv_info(
+ struct octeon_device *octeon_dev,
+ struct octeon_droq *droq,
+ u32 buf_cnt,
+ u32 idx)
+{
+ struct octeon_droq_info *info;
+ struct octeon_recv_pkt *recv_pkt;
+ struct octeon_recv_info *recv_info;
+ u32 i, bytes_left;
+ struct octeon_skb_page_info *pg_info;
+
+ info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
+
+ recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
+ if (!recv_info)
+ return NULL;
+
+ recv_pkt = recv_info->recv_pkt;
+ recv_pkt->rh = info->rh;
+ recv_pkt->length = (u32)info->length;
+ recv_pkt->buffer_count = (u16)buf_cnt;
+ recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
+
+ i = 0;
+ bytes_left = (u32)info->length;
+
+ while (buf_cnt) {
+ {
+ pg_info = &droq->recv_buf_list[idx].pg_info;
+
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->page = NULL;
+ pg_info->dma = 0;
+ }
+
+ recv_pkt->buffer_size[i] =
+ (bytes_left >=
+ droq->buffer_size) ? droq->buffer_size : bytes_left;
+
+ recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
+ droq->recv_buf_list[idx].buffer = NULL;
+
+ idx = incr_index(idx, 1, droq->max_count);
+ bytes_left -= droq->buffer_size;
+ i++;
+ buf_cnt--;
+ }
+
+ return recv_info;
+}
+
+/* If we were not able to refill all buffers, try to move around
+ * the buffers that were not dispatched.
+ */
+static inline u32
+octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
+ struct octeon_droq_desc *desc_ring)
+{
+ u32 desc_refilled = 0;
+
+ u32 refill_index = droq->refill_idx;
+
+ while (refill_index != droq->read_idx) {
+ if (droq->recv_buf_list[refill_index].buffer) {
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ droq->recv_buf_list[refill_index].buffer;
+ droq->recv_buf_list[droq->refill_idx].data =
+ droq->recv_buf_list[refill_index].data;
+ desc_ring[droq->refill_idx].buffer_ptr =
+ desc_ring[refill_index].buffer_ptr;
+ droq->recv_buf_list[refill_index].buffer = NULL;
+ desc_ring[refill_index].buffer_ptr = 0;
+ do {
+ droq->refill_idx = incr_index(droq->refill_idx,
+ 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ } while (droq->recv_buf_list[droq->refill_idx].buffer);
+ }
+ refill_index = incr_index(refill_index, 1, droq->max_count);
+ } /* while */
+ return desc_refilled;
+}
+
+/* octeon_droq_refill
+ * Parameters:
+ * droq - droq in which descriptors require new buffers.
+ * Description:
+ * Called during normal DROQ processing in interrupt mode or by the poll
+ * thread to refill the descriptors from which buffers were dispatched
+ * to upper layers. Attempts to allocate new buffers. If that fails, moves
+ * up buffers (that were not dispatched) to form a contiguous ring.
+ * Returns:
+ * No of descriptors refilled.
+ */
+static u32
+octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
+{
+ struct octeon_droq_desc *desc_ring;
+ void *buf = NULL;
+ u8 *data;
+ u32 desc_refilled = 0;
+ struct octeon_skb_page_info *pg_info;
+
+ desc_ring = droq->desc_ring;
+
+ while (droq->refill_count && (desc_refilled < droq->max_count)) {
+ /* If a valid buffer exists (happens if there is no dispatch),
+ * reuse the buffer, else allocate.
+ */
+ if (!droq->recv_buf_list[droq->refill_idx].buffer) {
+ pg_info =
+ &droq->recv_buf_list[droq->refill_idx].pg_info;
+ /* Either recycle the existing pages or go for
+ * new page alloc
+ */
+ if (pg_info->page)
+ buf = recv_buffer_reuse(octeon_dev, pg_info);
+ else
+ buf = recv_buffer_alloc(octeon_dev, pg_info);
+ /* If a buffer could not be allocated, no point in
+ * continuing
+ */
+ if (!buf) {
+ droq->stats.rx_alloc_failure++;
+ break;
+ }
+ droq->recv_buf_list[droq->refill_idx].buffer =
+ buf;
+ data = get_rbd(buf);
+ } else {
+ data = get_rbd(droq->recv_buf_list
+ [droq->refill_idx].buffer);
+ }
+
+ droq->recv_buf_list[droq->refill_idx].data = data;
+
+ desc_ring[droq->refill_idx].buffer_ptr =
+ lio_map_ring(droq->recv_buf_list[
+ droq->refill_idx].buffer);
+
+ droq->refill_idx = incr_index(droq->refill_idx, 1,
+ droq->max_count);
+ desc_refilled++;
+ droq->refill_count--;
+ }
+
+ if (droq->refill_count)
+ desc_refilled +=
+ octeon_droq_refill_pullup_descs(droq, desc_ring);
+
+ /* if droq->refill_count
+ * The refill count would not change in pass two. We only moved buffers
+ * to close the gap in the ring, but we would still have the same no. of
+ * buffers to refill.
+ */
+ return desc_refilled;
+}
+
+/** check if we can allocate packets to get out of oom.
+ * @param droq - Droq being checked.
+ * @return 1 if fails to refill minimum
+ */
+int octeon_retry_droq_refill(struct octeon_droq *droq)
+{
+ struct octeon_device *oct = droq->oct_dev;
+ int desc_refilled, reschedule = 1;
+ u32 pkts_credit;
+
+ pkts_credit = readl(droq->pkts_credit_reg);
+ desc_refilled = octeon_droq_refill(oct, droq);
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to be sure
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+
+ if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
+ reschedule = 0;
+ }
+
+ return reschedule;
+}
+
+static inline u32
+octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
+{
+ return DIV_ROUND_UP(total_len, buf_size);
+}
+
+static int
+octeon_droq_dispatch_pkt(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ union octeon_rh *rh,
+ struct octeon_droq_info *info)
+{
+ u32 cnt;
+ octeon_dispatch_fn_t disp_fn;
+ struct octeon_recv_info *rinfo;
+
+ cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
+
+ disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
+ (u16)rh->r.subcode);
+ if (disp_fn) {
+ rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
+ if (rinfo) {
+ struct __dispatch *rdisp = rinfo->rsvd;
+
+ rdisp->rinfo = rinfo;
+ rdisp->disp_fn = disp_fn;
+ rinfo->recv_pkt->rh = *rh;
+ list_add_tail(&rdisp->list,
+ &droq->dispatch_list);
+ } else {
+ droq->stats.dropped_nomem++;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
+ (unsigned int)rh->r.opcode,
+ (unsigned int)rh->r.subcode);
+ droq->stats.dropped_nodispatch++;
+ }
+
+ return cnt;
+}
+
+static inline void octeon_droq_drop_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 cnt)
+{
+ u32 i = 0, buf_cnt;
+ struct octeon_droq_info *info;
+
+ for (i = 0; i < cnt; i++) {
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (info->length) {
+ info->length += OCTNET_FRM_LENGTH_SIZE;
+ droq->stats.bytes_received += info->length;
+ buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
+ (u32)info->length);
+ } else {
+ dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
+ buf_cnt = 1;
+ }
+
+ droq->read_idx = incr_index(droq->read_idx, buf_cnt,
+ droq->max_count);
+ droq->refill_count += buf_cnt;
+ }
+}
+
+static u32
+octeon_droq_fast_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 pkts_to_process)
+{
+ u32 pkt, total_len = 0, pkt_count, retval;
+ struct octeon_droq_info *info;
+ union octeon_rh *rh;
+
+ pkt_count = pkts_to_process;
+
+ for (pkt = 0; pkt < pkt_count; pkt++) {
+ u32 pkt_len = 0;
+ struct sk_buff *nicbuf = NULL;
+ struct octeon_skb_page_info *pg_info;
+ void *buf;
+
+ info = (struct octeon_droq_info *)
+ droq->recv_buf_list[droq->read_idx].data;
+ octeon_swap_8B_data((u64 *)info, 2);
+
+ if (!info->length) {
+ dev_err(&oct->pci_dev->dev,
+ "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
+ droq->q_no, droq->read_idx, pkt_count);
+ print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
+ (u8 *)info,
+ OCT_DROQ_INFO_SIZE);
+ break;
+ }
+
+ /* Len of resp hdr in included in the received data len. */
+ rh = &info->rh;
+
+ info->length += OCTNET_FRM_LENGTH_SIZE;
+ rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
+ total_len += (u32)info->length;
+ if (opcode_slow_path(rh)) {
+ u32 buf_cnt;
+
+ buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
+ droq->read_idx = incr_index(droq->read_idx,
+ buf_cnt, droq->max_count);
+ droq->refill_count += buf_cnt;
+ } else {
+ if (info->length <= droq->buffer_size) {
+ pkt_len = (u32)info->length;
+ nicbuf = droq->recv_buf_list[
+ droq->read_idx].buffer;
+ pg_info = &droq->recv_buf_list[
+ droq->read_idx].pg_info;
+ if (recv_buffer_recycle(oct, pg_info))
+ pg_info->page = NULL;
+ droq->recv_buf_list[droq->read_idx].buffer =
+ NULL;
+
+ droq->read_idx = incr_index(droq->read_idx, 1,
+ droq->max_count);
+ droq->refill_count++;
+ } else {
+ nicbuf = octeon_fast_packet_alloc((u32)
+ info->length);
+ pkt_len = 0;
+ /* nicbuf allocation can fail. We'll handle it
+ * inside the loop.
+ */
+ while (pkt_len < info->length) {
+ int cpy_len, idx = droq->read_idx;
+
+ cpy_len = ((pkt_len + droq->buffer_size)
+ > info->length) ?
+ ((u32)info->length - pkt_len) :
+ droq->buffer_size;
+
+ if (nicbuf) {
+ octeon_fast_packet_next(droq,
+ nicbuf,
+ cpy_len,
+ idx);
+ buf = droq->recv_buf_list[
+ idx].buffer;
+ recv_buffer_fast_free(buf);
+ droq->recv_buf_list[idx].buffer
+ = NULL;
+ } else {
+ droq->stats.rx_alloc_failure++;
+ }
+
+ pkt_len += cpy_len;
+ droq->read_idx =
+ incr_index(droq->read_idx, 1,
+ droq->max_count);
+ droq->refill_count++;
+ }
+ }
+
+ if (nicbuf) {
+ if (droq->ops.fptr) {
+ droq->ops.fptr(oct->octeon_id,
+ nicbuf, pkt_len,
+ rh, &droq->napi,
+ droq->ops.farg);
+ } else {
+ recv_buffer_free(nicbuf);
+ }
+ }
+ }
+
+ if (droq->refill_count >= droq->refill_threshold) {
+ int desc_refilled = octeon_droq_refill(oct, droq);
+
+ if (desc_refilled) {
+ /* Flush the droq descriptor data to memory to
+ * be sure that when we update the credits the
+ * data in memory is accurate.
+ */
+ wmb();
+ writel(desc_refilled, droq->pkts_credit_reg);
+ }
+ }
+ } /* for (each packet)... */
+
+ /* Increment refill_count by the number of buffers processed. */
+ droq->stats.pkts_received += pkt;
+ droq->stats.bytes_received += total_len;
+
+ retval = pkt;
+ if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
+ octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
+
+ droq->stats.dropped_toomany += (pkts_to_process - pkt);
+ retval = pkts_to_process;
+ }
+
+ atomic_sub(retval, &droq->pkts_pending);
+
+ if (droq->refill_count >= droq->refill_threshold &&
+ readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
+ octeon_droq_check_hw_for_pkts(droq);
+
+ /* Make sure there are no pkts_pending */
+ if (!atomic_read(&droq->pkts_pending))
+ octeon_schedule_rxq_oom_work(oct, droq);
+ }
+
+ return retval;
+}
+
+int
+octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget)
+{
+ u32 pkt_count = 0;
+ struct list_head *tmp, *tmp2;
+
+ octeon_droq_check_hw_for_pkts(droq);
+ pkt_count = atomic_read(&droq->pkts_pending);
+
+ if (!pkt_count)
+ return 0;
+
+ if (pkt_count > budget)
+ pkt_count = budget;
+
+ octeon_droq_fast_process_packets(oct, droq, pkt_count);
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ /* If there are packets pending. schedule tasklet again */
+ if (atomic_read(&droq->pkts_pending))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Utility function to poll for packets. check_hw_for_packets must be
+ * called before calling this routine.
+ */
+
+int
+octeon_droq_process_poll_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq, u32 budget)
+{
+ struct list_head *tmp, *tmp2;
+ u32 pkts_available = 0, pkts_processed = 0;
+ u32 total_pkts_processed = 0;
+
+ if (budget > droq->max_count)
+ budget = droq->max_count;
+
+ while (total_pkts_processed < budget) {
+ octeon_droq_check_hw_for_pkts(droq);
+
+ pkts_available = min((budget - total_pkts_processed),
+ (u32)(atomic_read(&droq->pkts_pending)));
+
+ if (pkts_available == 0)
+ break;
+
+ pkts_processed =
+ octeon_droq_fast_process_packets(oct, droq,
+ pkts_available);
+
+ total_pkts_processed += pkts_processed;
+ }
+
+ list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
+ struct __dispatch *rdisp = (struct __dispatch *)tmp;
+
+ list_del(tmp);
+ rdisp->disp_fn(rdisp->rinfo,
+ octeon_get_dispatch_arg
+ (oct,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
+ (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
+ }
+
+ return total_pkts_processed;
+}
+
+/* Enable Pkt Interrupt */
+int
+octeon_enable_irq(struct octeon_device *oct, u32 q_no)
+{
+ switch (oct->chip_id) {
+ case OCTEON_CN66XX:
+ case OCTEON_CN68XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ unsigned long flags;
+ u32 value;
+
+ spin_lock_irqsave
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value);
+ value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
+ value |= (1 << q_no);
+ octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value);
+
+ /* don't bother flushing the enables */
+
+ spin_unlock_irqrestore
+ (&cn6xxx->lock_for_droq_int_enb_reg, flags);
+ }
+ break;
+ case OCTEON_CN23XX_PF_VID:
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ break;
+
+ case OCTEON_CN23XX_VF_VID:
+ lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__);
+ return 1;
+ }
+
+ return 0;
+}
+
+int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
+ struct octeon_droq_ops *ops)
+{
+ struct octeon_config *oct_cfg = NULL;
+ struct octeon_droq *droq;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (!(ops)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, (oct->num_oqs - 1));
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+ memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
+
+ return 0;
+}
+
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
+{
+ struct octeon_config *oct_cfg = NULL;
+ struct octeon_droq *droq;
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (!oct_cfg)
+ return -EINVAL;
+
+ if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
+ dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
+ __func__, q_no, oct->num_oqs - 1);
+ return -EINVAL;
+ }
+
+ droq = oct->droq[q_no];
+
+ if (!droq) {
+ dev_info(&oct->pci_dev->dev,
+ "Droq id (%d) not available.\n", q_no);
+ return 0;
+ }
+
+ droq->ops.fptr = NULL;
+ droq->ops.farg = NULL;
+ droq->ops.drop_on_max = 0;
+
+ return 0;
+}
+
+int octeon_create_droq(struct octeon_device *oct,
+ u32 q_no, u32 num_descs,
+ u32 desc_size, void *app_ctx)
+{
+ struct octeon_droq *droq;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ if (oct->droq[q_no]) {
+ dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
+ q_no);
+ return 1;
+ }
+
+ /* Allocate the DS for the new droq. */
+ droq = vmalloc_node(sizeof(*droq), numa_node);
+ if (!droq)
+ droq = vmalloc(sizeof(*droq));
+ if (!droq)
+ return -1;
+
+ memset(droq, 0, sizeof(struct octeon_droq));
+
+ /*Disable the pkt o/p for this Q */
+ octeon_set_droq_pkt_op(oct, q_no, 0);
+ oct->droq[q_no] = droq;
+
+ /* Initialize the Droq */
+ if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
+ vfree(oct->droq[q_no]);
+ oct->droq[q_no] = NULL;
+ return -1;
+ }
+
+ oct->num_oqs++;
+
+ dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
+ oct->num_oqs);
+
+ /* Global Droq register settings */
+
+ /* As of now not required, as setting are done for all 32 Droqs at
+ * the same time.
+ */
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
new file mode 100644
index 000000000..c9b19e624
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -0,0 +1,416 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file octeon_droq.h
+ * \brief Implementation of Octeon Output queues. "Output" is with
+ * respect to the Octeon device on the NIC. From this driver's point of
+ * view they are ingress queues.
+ */
+
+#ifndef __OCTEON_DROQ_H__
+#define __OCTEON_DROQ_H__
+
+/* Default number of packets that will be processed in one iteration. */
+#define MAX_PACKET_BUDGET 0xFFFFFFFF
+
+/** Octeon descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a octeon_droq_info structure.
+ * The Octeon device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct octeon_droq_desc {
+ /** The buffer pointer */
+ u64 buffer_ptr;
+
+ /** The Info pointer */
+ u64 info_ptr;
+};
+
+#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc))
+
+/** Information about packet DMA'ed by Octeon.
+ * The format of the information available at Info Pointer after Octeon
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct octeon_droq_info {
+ /** The Length of the packet. */
+ u64 length;
+
+ /** The Output Receive Header. */
+ union octeon_rh rh;
+};
+
+#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+
+struct octeon_skb_page_info {
+ /* DMA address for the page */
+ dma_addr_t dma;
+
+ /* Page for the rx dma **/
+ struct page *page;
+
+ /** which offset into page */
+ unsigned int page_offset;
+};
+
+/** Pointer to data buffer.
+ * Driver keeps a pointer to the data buffer that it made available to
+ * the Octeon device. Since the descriptor ring keeps physical (bus)
+ * addresses, this field is required for the driver to keep track of
+ * the virtual address pointers.
+ */
+struct octeon_recv_buffer {
+ /** Packet buffer, including metadata. */
+ void *buffer;
+
+ /** Data in the packet buffer. */
+ u8 *data;
+
+ /** pg_info **/
+ struct octeon_skb_page_info pg_info;
+};
+
+#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
+
+/** Output Queue statistics. Each output queue has four stats fields. */
+struct oct_droq_stats {
+ /** Number of packets received in this queue. */
+ u64 pkts_received;
+
+ /** Bytes received by this queue. */
+ u64 bytes_received;
+
+ /** Packets dropped due to no dispatch function. */
+ u64 dropped_nodispatch;
+
+ /** Packets dropped due to no memory available. */
+ u64 dropped_nomem;
+
+ /** Packets dropped due to large number of pkts to process. */
+ u64 dropped_toomany;
+
+ /** Number of packets sent to stack from this queue. */
+ u64 rx_pkts_received;
+
+ /** Number of Bytes sent to stack from this queue. */
+ u64 rx_bytes_received;
+
+ /** Num of Packets dropped due to receive path failures. */
+ u64 rx_dropped;
+
+ u64 rx_vxlan;
+
+ /** Num of failures of recv_buffer_alloc() */
+ u64 rx_alloc_failure;
+
+};
+
+/* The maximum number of buffers that can be dispatched from the
+ * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that
+ * max packet size from DROQ is 64K.
+ */
+#define MAX_RECV_BUFS 64
+
+/** Receive Packet format used when dispatching output queue packets
+ * with non-raw opcodes.
+ * The received packet will be sent to the upper layers using this
+ * structure which is passed as a parameter to the dispatch function
+ */
+struct octeon_recv_pkt {
+ /** Number of buffers in this received packet */
+ u16 buffer_count;
+
+ /** Id of the device that is sending the packet up */
+ u16 octeon_id;
+
+ /** Length of data in the packet buffer */
+ u32 length;
+
+ /** The receive header */
+ union octeon_rh rh;
+
+ /** Pointer to the OS-specific packet buffer */
+ void *buffer_ptr[MAX_RECV_BUFS];
+
+ /** Size of the buffers pointed to by ptr's in buffer_ptr */
+ u32 buffer_size[MAX_RECV_BUFS];
+};
+
+#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt))
+
+/** The first parameter of a dispatch function.
+ * For a raw mode opcode, the driver dispatches with the device
+ * pointer in this structure.
+ * For non-raw mode opcode, the driver dispatches the recv_pkt
+ * created to contain the buffers with data received from Octeon.
+ * ---------------------
+ * | *recv_pkt ----|---
+ * |-------------------| |
+ * | 0 or more bytes | |
+ * | reserved by driver| |
+ * |-------------------|<-/
+ * | octeon_recv_pkt |
+ * | |
+ * |___________________|
+ */
+struct octeon_recv_info {
+ void *rsvd;
+ struct octeon_recv_pkt *recv_pkt;
+};
+
+#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info))
+
+/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info
+ * structure is filled in before this call returns.
+ * @param extra_bytes - extra bytes to be allocated at the end of the recv info
+ * structure.
+ * @return - pointer to a newly allocated recv_info structure.
+ */
+static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes)
+{
+ struct octeon_recv_info *recv_info;
+ u8 *buf;
+
+ buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE +
+ extra_bytes, GFP_ATOMIC);
+ if (!buf)
+ return NULL;
+
+ recv_info = (struct octeon_recv_info *)buf;
+ recv_info->recv_pkt =
+ (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE);
+ recv_info->rsvd = NULL;
+ if (extra_bytes)
+ recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE;
+
+ return recv_info;
+}
+
+/** Free a recv_info structure.
+ * @param recv_info - Pointer to receive_info to be freed
+ */
+static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info)
+{
+ kfree(recv_info);
+}
+
+typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *);
+
+/** Used by NIC module to register packet handler and to get device
+ * information for each octeon device.
+ */
+struct octeon_droq_ops {
+ /** This registered function will be called by the driver with
+ * the octeon id, pointer to buffer from droq and length of
+ * data in the buffer. The receive header gives the port
+ * number to the caller. Function pointer is set by caller.
+ */
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
+ void *farg;
+
+ /* This function will be called by the driver for all NAPI related
+ * events. The first param is the octeon id. The second param is the
+ * output queue number. The third is the NAPI event that occurred.
+ */
+ void (*napi_fn)(void *);
+
+ u32 poll_mode;
+
+ /** Flag indicating if the DROQ handler should drop packets that
+ * it cannot handle in one iteration. Set by caller.
+ */
+ u32 drop_on_max;
+};
+
+/** The Descriptor Ring Output Queue structure.
+ * This structure has all the information required to implement a
+ * Octeon DROQ.
+ */
+struct octeon_droq {
+ u32 q_no;
+
+ u32 pkt_count;
+
+ struct octeon_droq_ops ops;
+
+ struct octeon_device *oct_dev;
+
+ /** The 8B aligned descriptor ring starts at this address. */
+ struct octeon_droq_desc *desc_ring;
+
+ /** Index in the ring where the driver should read the next packet */
+ u32 read_idx;
+
+ /** Index in the ring where Octeon will write the next packet */
+ u32 write_idx;
+
+ /** Index in the ring where the driver will refill the descriptor's
+ * buffer
+ */
+ u32 refill_idx;
+
+ /** Packets pending to be processed */
+ atomic_t pkts_pending;
+
+ /** Number of descriptors in this ring. */
+ u32 max_count;
+
+ /** The number of descriptors pending refill. */
+ u32 refill_count;
+
+ u32 pkts_per_intr;
+ u32 refill_threshold;
+
+ /** The max number of descriptors in DROQ without a buffer.
+ * This field is used to keep track of empty space threshold. If the
+ * refill_count reaches this value, the DROQ cannot accept a max-sized
+ * (64K) packet.
+ */
+ u32 max_empty_descs;
+
+ /** The receive buffer list. This list has the virtual addresses of the
+ * buffers.
+ */
+ struct octeon_recv_buffer *recv_buf_list;
+
+ /** The size of each buffer pointed by the buffer pointer. */
+ u32 buffer_size;
+
+ /** Pointer to the mapped packet credit register.
+ * Host writes number of info/buffer ptrs available to this register
+ */
+ void __iomem *pkts_credit_reg;
+
+ /** Pointer to the mapped packet sent register.
+ * Octeon writes the number of packets DMA'ed to host memory
+ * in this register.
+ */
+ void __iomem *pkts_sent_reg;
+
+ struct list_head dispatch_list;
+
+ /** Statistics for this DROQ. */
+ struct oct_droq_stats stats;
+
+ /** DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /** application context */
+ void *app_ctx;
+
+ struct napi_struct napi;
+
+ u32 cpu_id;
+
+ call_single_data_t csd;
+};
+
+#define OCT_DROQ_SIZE (sizeof(struct octeon_droq))
+
+/**
+ * Allocates space for the descriptor ring for the droq and sets the
+ * base addr, num desc etc in Octeon registers.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @param app_ctx - pointer to application context
+ * @return Success: 0 Failure: 1
+ */
+int octeon_init_droq(struct octeon_device *oct_dev,
+ u32 q_no,
+ u32 num_descs,
+ u32 desc_size,
+ void *app_ctx);
+
+/**
+ * Frees the space for descriptor ring for the droq.
+ *
+ * @param oct_dev - pointer to the octeon device structure
+ * @param q_no - droq no. ranges from 0 - 3.
+ * @return: Success: 0 Failure: 1
+ */
+int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no);
+
+/** Register a change in droq operations. The ops field has a pointer to a
+ * function which will called by the DROQ handler for all packets arriving
+ * on output queues given by q_no irrespective of the type of packet.
+ * The ops field also has a flag which if set tells the DROQ handler to
+ * drop packets if it receives more than what it can process in one
+ * invocation of the handler.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @param ops - the droq_ops settings for this queue
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int
+octeon_register_droq_ops(struct octeon_device *oct,
+ u32 q_no,
+ struct octeon_droq_ops *ops);
+
+/** Resets the function pointer and flag settings made by
+ * octeon_register_droq_ops(). After this routine is called, the DROQ handler
+ * will lookup dispatch function for each arriving packet on the output queue
+ * given by q_no.
+ * @param oct - octeon device
+ * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1
+ * @return - 0 on success, -ENODEV or -EINVAL on error.
+ */
+int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no);
+
+/** Register a dispatch function for a opcode/subcode. The driver will call
+ * this dispatch function when it receives a packet with the given
+ * opcode/subcode in its output queues along with the user specified
+ * argument.
+ * @param oct - the octeon device to register with.
+ * @param opcode - the opcode for which the dispatch will be registered.
+ * @param subcode - the subcode for which the dispatch will be registered
+ * @param fn - the dispatch function.
+ * @param fn_arg - user specified that will be passed along with the
+ * dispatch function by the driver.
+ * @return Success: 0; Failure: 1
+ */
+int octeon_register_dispatch_fn(struct octeon_device *oct,
+ u16 opcode,
+ u16 subcode,
+ octeon_dispatch_fn_t fn, void *fn_arg);
+
+void *octeon_get_dispatch_arg(struct octeon_device *oct,
+ u16 opcode, u16 subcode);
+
+void octeon_droq_print_stats(void);
+
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
+
+int octeon_create_droq(struct octeon_device *oct, u32 q_no,
+ u32 num_descs, u32 desc_size, void *app_ctx);
+
+int octeon_droq_process_packets(struct octeon_device *oct,
+ struct octeon_droq *droq,
+ u32 budget);
+
+int octeon_droq_process_poll_pkts(struct octeon_device *oct,
+ struct octeon_droq *droq, u32 budget);
+
+int octeon_enable_irq(struct octeon_device *oct, u32 q_no);
+
+int octeon_retry_droq_refill(struct octeon_droq *droq);
+
+#endif /*__OCTEON_DROQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
new file mode 100644
index 000000000..bebf3bd34
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -0,0 +1,399 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file octeon_iq.h
+ * \brief Host Driver: Implementation of Octeon input queues. "Input" is
+ * with respect to the Octeon device on the NIC. From this driver's
+ * point of view they are egress queues.
+ */
+
+#ifndef __OCTEON_IQ_H__
+#define __OCTEON_IQ_H__
+
+#define IQ_STATUS_RUNNING 1
+
+#define IQ_SEND_OK 0
+#define IQ_SEND_STOP 1
+#define IQ_SEND_FAILED -1
+
+/*------------------------- INSTRUCTION QUEUE --------------------------*/
+
+/* \cond */
+
+#define REQTYPE_NONE 0
+#define REQTYPE_NORESP_NET 1
+#define REQTYPE_NORESP_NET_SG 2
+#define REQTYPE_RESP_NET 3
+#define REQTYPE_RESP_NET_SG 4
+#define REQTYPE_SOFT_COMMAND 5
+#define REQTYPE_LAST 5
+
+struct octeon_request_list {
+ u32 reqtype;
+ void *buf;
+};
+
+/* \endcond */
+
+/** Input Queue statistics. Each input queue has four stats fields. */
+struct oct_iq_stats {
+ u64 instr_posted; /**< Instructions posted to this queue. */
+ u64 instr_processed; /**< Instructions processed in this queue. */
+ u64 instr_dropped; /**< Instructions that could not be processed */
+ u64 bytes_sent; /**< Bytes sent through this queue. */
+ u64 sgentry_sent;/**< Gather entries sent through this queue. */
+ u64 tx_done;/**< Num of packets sent to network. */
+ u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
+ u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
+ u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+ u64 tx_gso; /* count of tso */
+ u64 tx_vxlan; /* tunnel */
+ u64 tx_dmamap_fail; /* Number of times dma mapping failed */
+ u64 tx_restart; /* Number of times this queue restarted */
+};
+
+#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
+
+/** The instruction (input) queue.
+ * The input queue is used to post raw (instruction) mode data or packet
+ * data to Octeon device from the host. Each input queue (upto 4) for
+ * a Octeon device has one such structure to represent it.
+ */
+struct octeon_instr_queue {
+ struct octeon_device *oct_dev;
+
+ /** A spinlock to protect access to the input ring. */
+ spinlock_t lock;
+
+ /** A spinlock to protect while posting on the ring. */
+ spinlock_t post_lock;
+
+ /** This flag indicates if the queue can be used for soft commands.
+ * If this flag is set, post_lock must be acquired before posting
+ * a command to the queue.
+ * If this flag is clear, post_lock is invalid for the queue.
+ * All control commands (soft commands) will go through only Queue 0
+ * (control and data queue). So only queue-0 needs post_lock,
+ * other queues are only data queues and does not need post_lock
+ */
+ bool allow_soft_cmds;
+
+ u32 pkt_in_done;
+
+ u32 pkts_processed;
+
+ /** A spinlock to protect access to the input ring.*/
+ spinlock_t iq_flush_running_lock;
+
+ /** Flag that indicates if the queue uses 64 byte commands. */
+ u32 iqcmd_64B:1;
+
+ /** Queue info. */
+ union oct_txpciq txpciq;
+
+ u32 rsvd:17;
+
+ /* Controls whether extra flushing of IQ is done on Tx */
+ u32 do_auto_flush:1;
+
+ u32 status:8;
+
+ /** Maximum no. of instructions in this queue. */
+ u32 max_count;
+
+ /** Index in input ring where the driver should write the next packet */
+ u32 host_write_index;
+
+ /** Index in input ring where Octeon is expected to read the next
+ * packet.
+ */
+ u32 octeon_read_index;
+
+ /** This index aids in finding the window in the queue where Octeon
+ * has read the commands.
+ */
+ u32 flush_index;
+
+ /** This field keeps track of the instructions pending in this queue. */
+ atomic_t instr_pending;
+
+ u32 reset_instr_cnt;
+
+ /** Pointer to the Virtual Base addr of the input ring. */
+ u8 *base_addr;
+
+ struct octeon_request_list *request_list;
+
+ /** Octeon doorbell register for the ring. */
+ void __iomem *doorbell_reg;
+
+ /** Octeon instruction count register for this ring. */
+ void __iomem *inst_cnt_reg;
+
+ /** Number of instructions pending to be posted to Octeon. */
+ u32 fill_cnt;
+
+ /** The max. number of instructions that can be held pending by the
+ * driver.
+ */
+ u32 fill_threshold;
+
+ /** The last time that the doorbell was rung. */
+ u64 last_db_time;
+
+ /** The doorbell timeout. If the doorbell was not rung for this time and
+ * fill_cnt is non-zero, ring the doorbell again.
+ */
+ u32 db_timeout;
+
+ /** Statistics for this input queue. */
+ struct oct_iq_stats stats;
+
+ /** DMA mapped base address of the input descriptor ring. */
+ dma_addr_t base_addr_dma;
+
+ /** Application context */
+ void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /*os ifidx associated with this queue */
+ int ifidx;
+
+};
+
+/*---------------------- INSTRUCTION FORMAT ----------------------------*/
+
+/** 32-byte instruction format.
+ * Format of instruction for a 32-byte mode input queue.
+ */
+struct octeon_instr_32B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ /** Input Request Header. Additional info about the input. */
+ u64 irh;
+
+};
+
+#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B))
+
+/** 64-byte instruction format.
+ * Format of instruction for a 64-byte mode input queue.
+ */
+struct octeon_instr2_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih2;
+
+ /** Input Request Header. */
+ u64 irh;
+
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+ u64 reserved;
+};
+
+struct octeon_instr3_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih3;
+
+ /** Instruction Header. */
+ u64 pki_ih3;
+
+ /** Input Request Header. */
+ u64 irh;
+
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+};
+
+union octeon_instr_64B {
+ struct octeon_instr2_64B cmd2;
+ struct octeon_instr3_64B cmd3;
+};
+
+#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
+
+/** The size of each buffer in soft command buffer pool
+ */
+#define SOFT_COMMAND_BUFFER_SIZE 2048
+
+struct octeon_soft_command {
+ /** Soft command buffer info. */
+ struct list_head node;
+ u64 dma_addr;
+ u32 size;
+
+ /** Command and return status */
+ union octeon_instr_64B cmd;
+
+#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
+ u64 *status_word;
+
+ /** Data buffer info */
+ void *virtdptr;
+ u64 dmadptr;
+ u32 datasize;
+
+ /** Return buffer info */
+ void *virtrptr;
+ u64 dmarptr;
+ u32 rdatasize;
+
+ /** Context buffer info */
+ void *ctxptr;
+ u32 ctxsize;
+
+ /** Time out and callback */
+ size_t expiry_time;
+ u32 iq_no;
+ void (*callback)(struct octeon_device *, u32, void *);
+ void *callback_arg;
+
+ int caller_is_done;
+ u32 sc_status;
+ struct completion complete;
+};
+
+/* max timeout (in milli sec) for soft request */
+#define LIO_SC_MAX_TMO_MS 60000
+
+/** Maximum number of buffers to allocate into soft command buffer pool
+ */
+#define MAX_SOFT_COMMAND_BUFFERS 256
+
+/** Head of a soft command buffer pool.
+ */
+struct octeon_sc_buffer_pool {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t alloc_buf_count;
+};
+
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count)
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
+int octeon_free_sc_done_list(struct octeon_device *oct);
+int octeon_free_sc_zombie_list(struct octeon_device *oct);
+int octeon_free_sc_buffer_pool(struct octeon_device *oct);
+struct octeon_soft_command *
+ octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize, u32 rdatasize,
+ u32 ctxsize);
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+/**
+ * octeon_init_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param txpciq - queue to be initialized (0 <= q_no <= 3).
+ *
+ * Called at driver init time for each input queue. iq_conf has the
+ * configuration parameters for the queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_init_instr_queue(struct octeon_device *octeon_dev,
+ union oct_txpciq txpciq,
+ u32 num_descs);
+
+/**
+ * octeon_delete_instr_queue()
+ * @param octeon_dev - pointer to the octeon device structure.
+ * @param iq_no - queue to be deleted (0 <= q_no <= 3).
+ *
+ * Called at driver unload time for each input queue. Deletes all
+ * allocated resources for the input queue.
+ *
+ * @return Success: 0 Failure: 1
+ */
+int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no);
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct);
+
+void
+octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no);
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *));
+
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq, u32 napi_budget);
+
+int octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype);
+
+void octeon_dump_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+void octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode, u8 subcode,
+ u32 irh_ossp, u64 ossp0,
+ u64 ossp1);
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc);
+
+int octeon_setup_iq(struct octeon_device *oct, int ifidx,
+ int q_index, union oct_txpciq iq_no, u32 num_descs,
+ void *app_ctx);
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 napi_budget);
+#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
new file mode 100644
index 000000000..ad685f5d0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -0,0 +1,375 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "octeon_mailbox.h"
+#include "cn23xx_pf_device.h"
+
+/**
+ * octeon_mbox_read:
+ * @mbox: Pointer mailbox
+ *
+ * Reads the 8-bytes of data from the mbox register
+ * Writes back the acknowldgement inidcating completion of read
+ */
+int octeon_mbox_read(struct octeon_mbox *mbox)
+{
+ union octeon_mbox_message msg;
+ int ret = 0;
+
+ spin_lock(&mbox->lock);
+
+ msg.u64 = readq(mbox->mbox_read_reg);
+
+ if ((msg.u64 == OCTEON_PFVFACK) || (msg.u64 == OCTEON_PFVFSIG)) {
+ spin_unlock(&mbox->lock);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) {
+ mbox->mbox_req.data[mbox->mbox_req.recv_len - 1] = msg.u64;
+ mbox->mbox_req.recv_len++;
+ } else {
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) {
+ mbox->mbox_resp.data[mbox->mbox_resp.recv_len - 1] =
+ msg.u64;
+ mbox->mbox_resp.recv_len++;
+ } else {
+ if ((mbox->state & OCTEON_MBOX_STATE_IDLE) &&
+ (msg.s.type == OCTEON_MBOX_REQUEST)) {
+ mbox->state &= ~OCTEON_MBOX_STATE_IDLE;
+ mbox->state |=
+ OCTEON_MBOX_STATE_REQUEST_RECEIVING;
+ mbox->mbox_req.msg.u64 = msg.u64;
+ mbox->mbox_req.q_no = mbox->q_no;
+ mbox->mbox_req.recv_len = 1;
+ } else {
+ if ((mbox->state &
+ OCTEON_MBOX_STATE_RESPONSE_PENDING) &&
+ (msg.s.type == OCTEON_MBOX_RESPONSE)) {
+ mbox->state &=
+ ~OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ mbox->state |=
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING
+ ;
+ mbox->mbox_resp.msg.u64 = msg.u64;
+ mbox->mbox_resp.q_no = mbox->q_no;
+ mbox->mbox_resp.recv_len = 1;
+ } else {
+ writeq(OCTEON_PFVFERR,
+ mbox->mbox_read_reg);
+ mbox->state |= OCTEON_MBOX_STATE_ERROR;
+ spin_unlock(&mbox->lock);
+ return 1;
+ }
+ }
+ }
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVING) {
+ if (mbox->mbox_req.recv_len < mbox->mbox_req.msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVING;
+ mbox->state |= OCTEON_MBOX_STATE_REQUEST_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVING) {
+ if (mbox->mbox_resp.recv_len <
+ mbox->mbox_resp.msg.s.len) {
+ ret = 0;
+ } else {
+ mbox->state &=
+ ~OCTEON_MBOX_STATE_RESPONSE_RECEIVING;
+ mbox->state |=
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED;
+ ret = 1;
+ }
+ } else {
+ WARN_ON(1);
+ }
+ }
+
+ writeq(OCTEON_PFVFACK, mbox->mbox_read_reg);
+
+ spin_unlock(&mbox->lock);
+
+ return ret;
+}
+
+/**
+ * octeon_mbox_write:
+ * @oct: Pointer Octeon Device
+ * @mbox_cmd: Cmd to send to mailbox.
+ *
+ * Populates the queue specific mbox structure
+ * with cmd information.
+ * Write the cmd to mbox register
+ */
+int octeon_mbox_write(struct octeon_device *oct,
+ struct octeon_mbox_cmd *mbox_cmd)
+{
+ struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no];
+ u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS;
+ long timeout = LIO_MBOX_WRITE_WAIT_TIME;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if ((mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) &&
+ !(mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return OCTEON_MBOX_STATUS_FAILED;
+ }
+
+ if ((mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) &&
+ !(mbox->state & OCTEON_MBOX_STATE_IDLE)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return OCTEON_MBOX_STATUS_BUSY;
+ }
+
+ if (mbox_cmd->msg.s.type == OCTEON_MBOX_REQUEST) {
+ memcpy(&mbox->mbox_resp, mbox_cmd,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ }
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ count = 0;
+
+ while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) {
+ schedule_timeout_uninterruptible(timeout);
+ if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
+ ret = OCTEON_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS) {
+ writeq(mbox_cmd->msg.u64, mbox->mbox_write_reg);
+ for (i = 0; i < (u32)(mbox_cmd->msg.s.len - 1); i++) {
+ count = 0;
+ while (readq(mbox->mbox_write_reg) !=
+ OCTEON_PFVFACK) {
+ schedule_timeout_uninterruptible(timeout);
+ if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
+ ret = OCTEON_MBOX_STATUS_FAILED;
+ break;
+ }
+ }
+ if (ret == OCTEON_MBOX_STATUS_SUCCESS)
+ writeq(mbox_cmd->data[i], mbox->mbox_write_reg);
+ else
+ break;
+ }
+ }
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ if (mbox_cmd->msg.s.type == OCTEON_MBOX_RESPONSE) {
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ } else {
+ if ((!mbox_cmd->msg.s.resp_needed) ||
+ (ret == OCTEON_MBOX_STATUS_FAILED)) {
+ mbox->state &= ~OCTEON_MBOX_STATE_RESPONSE_PENDING;
+ if (!(mbox->state &
+ (OCTEON_MBOX_STATE_REQUEST_RECEIVING |
+ OCTEON_MBOX_STATE_REQUEST_RECEIVED)))
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ }
+ }
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return ret;
+}
+
+static void get_vf_stats(struct octeon_device *oct,
+ struct oct_vf_stats *stats)
+{
+ int i;
+
+ for (i = 0; i < oct->num_iqs; i++) {
+ if (!oct->instr_queue[i])
+ continue;
+ stats->tx_packets += oct->instr_queue[i]->stats.tx_done;
+ stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes;
+ }
+
+ for (i = 0; i < oct->num_oqs; i++) {
+ if (!oct->droq[i])
+ continue;
+ stats->rx_packets += oct->droq[i]->stats.rx_pkts_received;
+ stats->rx_bytes += oct->droq[i]->stats.rx_bytes_received;
+ }
+}
+
+/**
+ * octeon_mbox_process_cmd:
+ * @mbox: Pointer mailbox
+ * @mbox_cmd: Pointer to command received
+ *
+ * Process the cmd received in mbox
+ */
+static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
+ struct octeon_mbox_cmd *mbox_cmd)
+{
+ struct octeon_device *oct = mbox->oct_dev;
+
+ switch (mbox_cmd->msg.s.cmd) {
+ case OCTEON_VF_ACTIVE:
+ dev_dbg(&oct->pci_dev->dev, "got vfactive sending data back\n");
+ mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
+ mbox_cmd->msg.s.resp_needed = 1;
+ mbox_cmd->msg.s.len = 2;
+ mbox_cmd->data[0] = 0; /* VF version is in mbox_cmd->data[0] */
+ ((struct lio_version *)&mbox_cmd->data[0])->major =
+ LIQUIDIO_BASE_MAJOR_VERSION;
+ ((struct lio_version *)&mbox_cmd->data[0])->minor =
+ LIQUIDIO_BASE_MINOR_VERSION;
+ ((struct lio_version *)&mbox_cmd->data[0])->micro =
+ LIQUIDIO_BASE_MICRO_VERSION;
+ memcpy(mbox_cmd->msg.s.params, (uint8_t *)&oct->pfvf_hsword, 6);
+ /* Sending core cofig info to the corresponding active VF.*/
+ octeon_mbox_write(oct, mbox_cmd);
+ break;
+
+ case OCTEON_VF_FLR_REQUEST:
+ dev_info(&oct->pci_dev->dev,
+ "got a request for FLR from VF that owns DPI ring %u\n",
+ mbox->q_no);
+ pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]);
+ break;
+
+ case OCTEON_PF_CHANGED_VF_MACADDR:
+ if (OCTEON_CN23XX_VF(oct))
+ octeon_pf_changed_vf_macaddr(oct,
+ mbox_cmd->msg.s.params);
+ break;
+
+ case OCTEON_GET_VF_STATS:
+ dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n");
+ mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
+ mbox_cmd->msg.s.resp_needed = 1;
+ mbox_cmd->msg.s.len = 1 +
+ sizeof(struct oct_vf_stats) / sizeof(u64);
+ get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data);
+ octeon_mbox_write(oct, mbox_cmd);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/**
+ * octeon_mbox_process_message
+ * @mbox: mailbox
+ *
+ * Process the received mbox message.
+ */
+int octeon_mbox_process_message(struct octeon_mbox *mbox)
+{
+ struct octeon_mbox_cmd mbox_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+
+ if (mbox->state & OCTEON_MBOX_STATE_ERROR) {
+ if (mbox->state & (OCTEON_MBOX_STATE_RESPONSE_PENDING |
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING)) {
+ memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ mbox_cmd.recv_status = 1;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->oct_dev, &mbox_cmd,
+ mbox_cmd.fn_arg);
+ return 0;
+ }
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_RESPONSE_RECEIVED) {
+ memcpy(&mbox_cmd, &mbox->mbox_resp,
+ sizeof(struct octeon_mbox_cmd));
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ mbox_cmd.recv_status = 0;
+ if (mbox_cmd.fn)
+ mbox_cmd.fn(mbox->oct_dev, &mbox_cmd, mbox_cmd.fn_arg);
+ return 0;
+ }
+
+ if (mbox->state & OCTEON_MBOX_STATE_REQUEST_RECEIVED) {
+ memcpy(&mbox_cmd, &mbox->mbox_req,
+ sizeof(struct octeon_mbox_cmd));
+ if (!mbox_cmd.msg.s.resp_needed) {
+ mbox->state &= ~OCTEON_MBOX_STATE_REQUEST_RECEIVED;
+ if (!(mbox->state &
+ OCTEON_MBOX_STATE_RESPONSE_PENDING))
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ }
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ octeon_mbox_process_cmd(mbox, &mbox_cmd);
+ return 0;
+ }
+
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ WARN_ON(1);
+
+ return 0;
+}
+
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no)
+{
+ struct octeon_mbox *mbox = oct->mbox[q_no];
+ struct octeon_mbox_cmd *mbox_cmd;
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&mbox->lock, flags);
+ mbox_cmd = &mbox->mbox_resp;
+
+ if (!(mbox->state & OCTEON_MBOX_STATE_RESPONSE_PENDING)) {
+ spin_unlock_irqrestore(&mbox->lock, flags);
+ return 1;
+ }
+
+ mbox->state = OCTEON_MBOX_STATE_IDLE;
+ memset(mbox_cmd, 0, sizeof(*mbox_cmd));
+ writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
+ spin_unlock_irqrestore(&mbox->lock, flags);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
new file mode 100644
index 000000000..d92bd7e16
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -0,0 +1,122 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+#ifndef __MAILBOX_H__
+#define __MAILBOX_H__
+
+/* Macros for Mail Box Communication */
+
+#define OCTEON_MBOX_DATA_MAX 32
+
+#define OCTEON_VF_ACTIVE 0x1
+#define OCTEON_VF_FLR_REQUEST 0x2
+#define OCTEON_PF_CHANGED_VF_MACADDR 0x4
+#define OCTEON_GET_VF_STATS 0x8
+
+/*Macro for Read acknowldgement*/
+#define OCTEON_PFVFACK 0xffffffffffffffffULL
+#define OCTEON_PFVFSIG 0x1122334455667788ULL
+#define OCTEON_PFVFERR 0xDEADDEADDEADDEADULL
+
+#define LIO_MBOX_WRITE_WAIT_CNT 1000
+#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
+
+enum octeon_mbox_cmd_status {
+ OCTEON_MBOX_STATUS_SUCCESS = 0,
+ OCTEON_MBOX_STATUS_FAILED = 1,
+ OCTEON_MBOX_STATUS_BUSY = 2
+};
+
+enum octeon_mbox_message_type {
+ OCTEON_MBOX_REQUEST = 0,
+ OCTEON_MBOX_RESPONSE = 1
+};
+
+union octeon_mbox_message {
+ u64 u64;
+ struct {
+ u16 type : 1;
+ u16 resp_needed : 1;
+ u16 cmd : 6;
+ u16 len : 8;
+ u8 params[6];
+ } s;
+};
+
+typedef void (*octeon_mbox_callback_t)(void *, void *, void *);
+
+struct octeon_mbox_cmd {
+ union octeon_mbox_message msg;
+ u64 data[OCTEON_MBOX_DATA_MAX];
+ u32 q_no;
+ u32 recv_len;
+ u32 recv_status;
+ octeon_mbox_callback_t fn;
+ void *fn_arg;
+};
+
+enum octeon_mbox_state {
+ OCTEON_MBOX_STATE_IDLE = 1,
+ OCTEON_MBOX_STATE_REQUEST_RECEIVING = 2,
+ OCTEON_MBOX_STATE_REQUEST_RECEIVED = 4,
+ OCTEON_MBOX_STATE_RESPONSE_PENDING = 8,
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVING = 16,
+ OCTEON_MBOX_STATE_RESPONSE_RECEIVED = 32,
+ OCTEON_MBOX_STATE_ERROR = 64
+};
+
+struct octeon_mbox {
+ /** A spinlock to protect access to this q_mbox. */
+ spinlock_t lock;
+
+ struct octeon_device *oct_dev;
+
+ u32 q_no;
+
+ enum octeon_mbox_state state;
+
+ struct cavium_wk mbox_poll_wk;
+
+ /** SLI_MAC_PF_MBOX_INT for PF, SLI_PKT_MBOX_INT for VF. */
+ void *mbox_int_reg;
+
+ /** SLI_PKT_PF_VF_MBOX_SIG(0) for PF, SLI_PKT_PF_VF_MBOX_SIG(1) for VF.
+ */
+ void *mbox_write_reg;
+
+ /** SLI_PKT_PF_VF_MBOX_SIG(1) for PF, SLI_PKT_PF_VF_MBOX_SIG(0) for VF.
+ */
+ void *mbox_read_reg;
+
+ struct octeon_mbox_cmd mbox_req;
+
+ struct octeon_mbox_cmd mbox_resp;
+
+};
+
+struct oct_vf_stats_ctx {
+ atomic_t status;
+ struct oct_vf_stats *stats;
+};
+
+int octeon_mbox_read(struct octeon_mbox *mbox);
+int octeon_mbox_write(struct octeon_device *oct,
+ struct octeon_mbox_cmd *mbox_cmd);
+int octeon_mbox_process_message(struct octeon_mbox *mbox);
+int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
new file mode 100644
index 000000000..5b4cb725f
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -0,0 +1,236 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more details.
+ ***********************************************************************/
+/*! \file octeon_main.h
+ * \brief Host Driver: This file is included by all host driver source files
+ * to include common definitions.
+ */
+
+#ifndef _OCTEON_MAIN_H_
+#define _OCTEON_MAIN_H_
+
+#include <linux/sched/signal.h>
+
+#if BITS_PER_LONG == 32
+#define CVM_CAST64(v) ((long long)(v))
+#elif BITS_PER_LONG == 64
+#define CVM_CAST64(v) ((long long)(long)(v))
+#else
+#error "Unknown system architecture"
+#endif
+
+#define DRV_NAME "LiquidIO"
+
+struct octeon_device_priv {
+ /** Tasklet structures for this device. */
+ struct tasklet_struct droq_tasklet;
+ unsigned long napi_mask;
+ struct octeon_device *dev;
+};
+
+/** This structure is used by NIC driver to store information required
+ * to free the sk_buff when the packet has been fetched by Octeon.
+ * Bytes offset below assume worst-case of a 64-bit system.
+ */
+struct octnet_buf_free_info {
+ /** Bytes 1-8. Pointer to network device private structure. */
+ struct lio *lio;
+
+ /** Bytes 9-16. Pointer to sk_buff. */
+ struct sk_buff *skb;
+
+ /** Bytes 17-24. Pointer to gather list. */
+ struct octnic_gather *g;
+
+ /** Bytes 25-32. Physical address of skb->data or gather list. */
+ u64 dptr;
+
+ /** Bytes 33-47. Piggybacked soft command, if any */
+ struct octeon_soft_command *sc;
+};
+
+/* BQL-related functions */
+int octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
+void octeon_update_tx_completion_counters(void *buf, int reqtype,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl);
+void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
+ unsigned int bytes_compl);
+void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
+
+void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
+ struct octeon_droq *droq);
+
+/** Swap 8B blocks */
+static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
+{
+ while (blocks) {
+ cpu_to_be64s(data);
+ blocks--;
+ data++;
+ }
+}
+
+/**
+ * \brief unmaps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ */
+static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
+{
+ dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
+ baridx);
+
+ if (oct->mmio[baridx].done)
+ iounmap(oct->mmio[baridx].hw_addr);
+
+ if (oct->mmio[baridx].start)
+ pci_release_region(oct->pci_dev, baridx * 2);
+}
+
+/**
+ * \brief maps a PCI BAR
+ * @param oct Pointer to Octeon device
+ * @param baridx bar index
+ * @param max_map_len maximum length of mapped memory
+ */
+static inline int octeon_map_pci_barx(struct octeon_device *oct,
+ int baridx, int max_map_len)
+{
+ u32 mapped_len = 0;
+
+ if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
+ dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
+ baridx);
+ return 1;
+ }
+
+ oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
+ oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
+
+ mapped_len = oct->mmio[baridx].len;
+ if (!mapped_len)
+ goto err_release_region;
+
+ if (max_map_len && (mapped_len > max_map_len))
+ mapped_len = max_map_len;
+
+ oct->mmio[baridx].hw_addr =
+ ioremap(oct->mmio[baridx].start, mapped_len);
+ oct->mmio[baridx].mapped_len = mapped_len;
+
+ dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
+ baridx, oct->mmio[baridx].start, mapped_len,
+ oct->mmio[baridx].len);
+
+ if (!oct->mmio[baridx].hw_addr) {
+ dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
+ baridx);
+ goto err_release_region;
+ }
+ oct->mmio[baridx].done = 1;
+
+ return 0;
+
+err_release_region:
+ pci_release_region(oct->pci_dev, baridx * 2);
+ return 1;
+}
+
+/* input parameter:
+ * sc: pointer to a soft request
+ * timeout: milli sec which an application wants to wait for the
+ response of the request.
+ * 0: the request will wait until its response gets back
+ * from the firmware within LIO_SC_MAX_TMO_MS milli sec.
+ * It the response does not return within
+ * LIO_SC_MAX_TMO_MS milli sec, lio_process_ordered_list()
+ * will move the request to zombie response list.
+ *
+ * return value:
+ * 0: got the response from firmware for the sc request.
+ * errno -EINTR: user abort the command.
+ * errno -ETIME: user spefified timeout value has been expired.
+ * errno -EBUSY: the response of the request does not return in
+ * resonable time (LIO_SC_MAX_TMO_MS).
+ * the sc wll be move to zombie response list by
+ * lio_process_ordered_list()
+ *
+ * A request with non-zero return value, the sc->caller_is_done
+ * will be marked 1.
+ * When getting a request with zero return value, the requestor
+ * should mark sc->caller_is_done with 1 after examing the
+ * response of sc.
+ * lio_process_ordered_list() will free the soft command on behalf
+ * of the soft command requestor.
+ * This is to fix the possible race condition of both timeout process
+ * and lio_process_ordered_list()/callback function to free a
+ * sc strucutre.
+ */
+static inline int
+wait_for_sc_completion_timeout(struct octeon_device *oct_dev,
+ struct octeon_soft_command *sc,
+ unsigned long timeout)
+{
+ int errno = 0;
+ long timeout_jiff;
+
+ if (timeout)
+ timeout_jiff = msecs_to_jiffies(timeout);
+ else
+ timeout_jiff = MAX_SCHEDULE_TIMEOUT;
+
+ timeout_jiff =
+ wait_for_completion_interruptible_timeout(&sc->complete,
+ timeout_jiff);
+ if (timeout_jiff == 0) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc is timeout\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -ETIME;
+ } else if (timeout_jiff == -ERESTARTSYS) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc is interrupted\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -EINTR;
+ } else if (sc->sc_status == OCTEON_REQUEST_TIMEOUT) {
+ dev_err(&oct_dev->pci_dev->dev, "%s: sc has fatal timeout\n",
+ __func__);
+ WRITE_ONCE(sc->caller_is_done, true);
+ errno = -EBUSY;
+ }
+
+ return errno;
+}
+
+#ifndef ROUNDUP4
+#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
+#endif
+
+#ifndef ROUNDUP8
+#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
+#endif
+
+#ifndef ROUNDUP16
+#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
+#endif
+
+#ifndef ROUNDUP128
+#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
+#endif
+
+#endif /* _OCTEON_MAIN_H_ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
new file mode 100644
index 000000000..7ccab3614
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -0,0 +1,201 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_mem_ops.h"
+
+#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
+
+#ifdef __BIG_ENDIAN_BITFIELD
+static inline void
+octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
+{
+ u32 mask;
+
+ mask = oct->fn_list.bar1_idx_read(oct, idx);
+ mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
+ oct->fn_list.bar1_idx_write(oct, idx, mask);
+}
+#else
+#define octeon_toggle_bar1_swapmode(oct, idx)
+#endif
+
+static void
+octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ writeb(*(hostbuf++), mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ writeq(*((u64 *)hostbuf), mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ writeb(*(hostbuf++), mapped_addr++);
+}
+
+static void
+octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr,
+ u8 *hostbuf, u32 len)
+{
+ while ((len) && ((unsigned long)mapped_addr) & 7) {
+ *(hostbuf++) = readb(mapped_addr++);
+ len--;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len >= 8) {
+ *((u64 *)hostbuf) = readq(mapped_addr);
+ mapped_addr += 8;
+ hostbuf += 8;
+ len -= 8;
+ }
+
+ octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX);
+
+ while (len--)
+ *(hostbuf++) = readb(mapped_addr++);
+}
+
+/* Core mem read/write with temporary bar1 settings. */
+/* op = 1 to read, op = 0 to write. */
+static void
+__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr,
+ u8 *hostbuf, u32 len, u32 op)
+{
+ u32 copy_len = 0, index_reg_val = 0;
+ unsigned long flags;
+ u8 __iomem *mapped_addr;
+ u64 static_mapping_base;
+
+ static_mapping_base = oct->console_nb_info.dram_region_base;
+
+ if (static_mapping_base &&
+ static_mapping_base == (addr & ~(OCTEON_BAR1_ENTRY_SIZE - 1ULL))) {
+ int bar1_index = oct->console_nb_info.bar1_index;
+
+ mapped_addr = oct->mmio[1].hw_addr
+ + (bar1_index << ilog2(OCTEON_BAR1_ENTRY_SIZE))
+ + (addr & (OCTEON_BAR1_ENTRY_SIZE - 1ULL));
+
+ if (op)
+ octeon_pci_fastread(oct, mapped_addr, hostbuf, len);
+ else
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf, len);
+
+ return;
+ }
+
+ spin_lock_irqsave(&oct->mem_access_lock, flags);
+
+ /* Save the original index reg value. */
+ index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX);
+ do {
+ oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1);
+ mapped_addr = oct->mmio[1].hw_addr
+ + (MEMOPS_IDX << 22) + (addr & 0x3fffff);
+
+ /* If operation crosses a 4MB boundary, split the transfer
+ * at the 4MB
+ * boundary.
+ */
+ if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) {
+ copy_len = (u32)(((addr & ~(0x3fffff)) +
+ (MEMOPS_IDX << 22)) - addr);
+ } else {
+ copy_len = len;
+ }
+
+ if (op) { /* read from core */
+ octeon_pci_fastread(oct, mapped_addr, hostbuf,
+ copy_len);
+ } else {
+ octeon_pci_fastwrite(oct, mapped_addr, hostbuf,
+ copy_len);
+ }
+
+ len -= copy_len;
+ addr += copy_len;
+ hostbuf += copy_len;
+
+ } while (len);
+
+ oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val);
+
+ spin_unlock_irqrestore(&oct->mem_access_lock, flags);
+}
+
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1);
+}
+
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ const u8 *buf,
+ u32 len)
+{
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)buf, len, 0);
+}
+
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr)
+{
+ __be64 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1);
+
+ return be64_to_cpu(ret);
+}
+
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr)
+{
+ __be32 ret;
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1);
+
+ return be32_to_cpu(ret);
+}
+
+void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr,
+ u32 val)
+{
+ __be32 t = cpu_to_be32(val);
+
+ __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
new file mode 100644
index 000000000..47a3ff5f9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h
@@ -0,0 +1,72 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+
+/*! \file octeon_mem_ops.h
+ * \brief Host Driver: Routines used to read/write Octeon memory.
+ */
+
+#ifndef __OCTEON_MEM_OPS_H__
+#define __OCTEON_MEM_OPS_H__
+
+/** Read a 64-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * The range_idx gives the BAR1 index register for the range of address
+ * in which core_addr is mapped.
+ *
+ * @return 64-bit value read from Core memory
+ */
+u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr);
+
+/** Read a 32-bit value from a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to read from.
+ *
+ * @return 32-bit value read from Core memory
+ */
+u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr);
+
+/** Write a 32-bit value to a BAR1 mapped core memory address.
+ * @param oct - pointer to the octeon device.
+ * @param core_addr - the address to write to.
+ * @param val - 32-bit value to write.
+ */
+void
+octeon_write_device_mem32(struct octeon_device *oct,
+ u64 core_addr,
+ u32 val);
+
+/** Read multiple bytes from Octeon memory.
+ */
+void
+octeon_pci_read_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ u8 *buf,
+ u32 len);
+
+/** Write multiple bytes into Octeon memory.
+ */
+void
+octeon_pci_write_core_mem(struct octeon_device *oct,
+ u64 coreaddr,
+ const u8 *buf,
+ u32 len);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
new file mode 100644
index 000000000..ebe56bd88
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -0,0 +1,626 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+
+/*! \file octeon_network.h
+ * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
+ */
+
+#ifndef __OCTEON_NETWORK_H__
+#define __OCTEON_NETWORK_H__
+#include <linux/ptp_clock_kernel.h>
+
+#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
+#define LIO_MIN_MTU_SIZE ETH_MIN_MTU
+
+/* Bit mask values for lio->ifstate */
+#define LIO_IFSTATE_DROQ_OPS 0x01
+#define LIO_IFSTATE_REGISTERED 0x02
+#define LIO_IFSTATE_RUNNING 0x04
+#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
+#define LIO_IFSTATE_RESETTING 0x10
+
+struct liquidio_if_cfg_resp {
+ u64 rh;
+ struct liquidio_if_cfg_info cfg_info;
+ u64 status;
+};
+
+#define LIO_IFCFG_WAIT_TIME 3000 /* In milli seconds */
+#define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
+
+/* Structure of a node in list of gather components maintained by
+ * NIC driver for each network device.
+ */
+struct octnic_gather {
+ /* List manipulation. Next and prev pointers. */
+ struct list_head list;
+
+ /* Size of the gather component at sg in bytes. */
+ int sg_size;
+
+ /* Number of bytes that sg was adjusted to make it 8B-aligned. */
+ int adjust;
+
+ /* Gather component that can accommodate max sized fragment list
+ * received from the IP layer.
+ */
+ struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
+};
+
+struct oct_nic_stats_resp {
+ u64 rh;
+ struct oct_link_stats stats;
+ u64 status;
+};
+
+struct oct_nic_vf_stats_resp {
+ u64 rh;
+ u64 spoofmac_cnt;
+ u64 status;
+};
+
+struct oct_nic_stats_ctrl {
+ struct completion complete;
+ struct net_device *netdev;
+};
+
+struct oct_nic_seapi_resp {
+ u64 rh;
+ union {
+ u32 fec_setting;
+ u32 speed;
+ };
+ u64 status;
+};
+
+/** LiquidIO per-interface network private data */
+struct lio {
+ /** State of the interface. Rx/Tx happens only in the RUNNING state. */
+ atomic_t ifstate;
+
+ /** Octeon Interface index number. This device will be represented as
+ * oct<ifidx> in the system.
+ */
+ int ifidx;
+
+ /** Octeon Input queue to use to transmit for this network interface. */
+ int txq;
+
+ /** Octeon Output queue from which pkts arrive
+ * for this network interface.
+ */
+ int rxq;
+
+ /** Guards each glist */
+ spinlock_t *glist_lock;
+
+ /** Array of gather component linked lists */
+ struct list_head *glist;
+ void **glists_virt_base;
+ dma_addr_t *glists_dma_base;
+ u32 glist_entry_size;
+
+ /** Pointer to the NIC properties for the Octeon device this network
+ * interface is associated with.
+ */
+ struct octdev_props *octprops;
+
+ /** Pointer to the octeon device structure. */
+ struct octeon_device *oct_dev;
+
+ struct net_device *netdev;
+
+ /** Link information sent by the core application for this interface. */
+ struct oct_link_info linfo;
+
+ /** counter of link changes */
+ u64 link_changes;
+
+ /** Size of Tx queue for this octeon device. */
+ u32 tx_qsize;
+
+ /** Size of Rx queue for this octeon device. */
+ u32 rx_qsize;
+
+ /** Size of MTU this octeon device. */
+ u32 mtu;
+
+ /** msg level flag per interface. */
+ u32 msg_enable;
+
+ /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
+ u64 dev_capability;
+
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device for Kernel
+ * 3.10.0 onwards
+ */
+ u64 enc_dev_capability;
+
+ /** Copy of beacaon reg in phy */
+ u32 phy_beacon_val;
+
+ /** Copy of ctrl reg in phy */
+ u32 led_ctrl_val;
+
+ /* PTP clock information */
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ s64 ptp_adjust;
+
+ /* for atomic access to Octeon PTP reg and data struct */
+ spinlock_t ptp_lock;
+
+ /* Interface info */
+ u32 intf_open;
+
+ /* work queue for txq status */
+ struct cavium_wq txq_status_wq;
+
+ /* work queue for rxq oom status */
+ struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
+
+ /* work queue for link status */
+ struct cavium_wq link_status_wq;
+
+ /* work queue to regularly send local time to octeon firmware */
+ struct cavium_wq sync_octeon_time_wq;
+
+ int netdev_uc_count;
+ struct cavium_wk stats_wk;
+};
+
+#define LIO_SIZE (sizeof(struct lio))
+#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev))
+
+#define LIO_MAX_CORES 16
+
+/**
+ * \brief Enable or disable feature
+ * @param netdev pointer to network device
+ * @param cmd Command that just requires acknowledgment
+ * @param param1 Parameter to command
+ */
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
+
+int setup_rx_oom_poll_fn(struct net_device *netdev);
+
+void cleanup_rx_oom_poll_fn(struct net_device *netdev);
+
+/**
+ * \brief Link control command completion callback
+ * @param nctrl_ptr pointer to control packet structure
+ *
+ * This routine is called by the callback function when a ctrl pkt sent to
+ * core app completes. The nctrl_ptr contains a copy of the command type
+ * and data sent to the core app. This routine is only called if the ctrl
+ * pkt was sent successfully to the core app.
+ */
+void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
+
+int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
+ u32 num_iqs, u32 num_oqs);
+
+irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
+ void *dev);
+
+int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
+
+void lio_fetch_stats(struct work_struct *work);
+
+int lio_wait_for_clean_oq(struct octeon_device *oct);
+/**
+ * \brief Register ethtool operations
+ * @param netdev pointer to network device
+ */
+void liquidio_set_ethtool_ops(struct net_device *netdev);
+
+void lio_delete_glists(struct lio *lio);
+
+int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
+
+int liquidio_get_speed(struct lio *lio);
+int liquidio_set_speed(struct lio *lio, int speed);
+int liquidio_get_fec(struct lio *lio);
+int liquidio_set_fec(struct lio *lio, int on_off);
+
+/**
+ * \brief Net device change_mtu
+ * @param netdev network device
+ */
+int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
+#define LIO_CHANGE_MTU_SUCCESS 1
+#define LIO_CHANGE_MTU_FAIL 2
+
+#define SKB_ADJ_MASK 0x3F
+#define SKB_ADJ (SKB_ADJ_MASK + 1)
+
+#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
+#define LIO_RXBUFFER_SZ 2048
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct,
+ struct octeon_skb_page_info *pg_info)
+{
+ struct page *page;
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page))
+ return NULL;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ __free_page(page);
+ pg_info->page = NULL;
+ return NULL;
+ }
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ /* Get DMA info */
+ pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* Mapping failed!! */
+ if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
+ __free_page(page);
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ pg_info->page = NULL;
+ return NULL;
+ }
+
+ pg_info->page = page;
+ pg_info->page_offset = 0;
+ skb_pg_info->page = page;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = pg_info->dma;
+
+ return (void *)skb;
+}
+
+static inline void
+*recv_buffer_fast_alloc(u32 size)
+{
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ skb = dev_alloc_skb(size + SKB_ADJ);
+ if (unlikely(!skb))
+ return NULL;
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = NULL;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = 0;
+
+ return skb;
+}
+
+static inline int
+recv_buffer_recycle(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf;
+
+ if (!pg_info->page) {
+ dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (unlikely(page_count(pg_info->page) != 1) ||
+ unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ return -ENOMEM;
+ }
+
+ /* Flip to other half of the buffer */
+ if (pg_info->page_offset == 0)
+ pg_info->page_offset = LIO_RXBUFFER_SZ;
+ else
+ pg_info->page_offset = 0;
+ page_ref_inc(pg_info->page);
+
+ return 0;
+}
+
+static inline void
+*recv_buffer_reuse(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ return NULL;
+ }
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = pg_info->page;
+ skb_pg_info->page_offset = pg_info->page_offset;
+ skb_pg_info->dma = pg_info->dma;
+
+ return skb;
+}
+
+static inline void
+recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
+{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
+static inline void recv_buffer_free(void *buffer)
+{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+ struct octeon_skb_page_info *pg_info;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+
+ if (pg_info->page) {
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ }
+
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void
+recv_buffer_fast_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void tx_buffer_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+#define lio_dma_alloc(oct, size, dma_addr) \
+ dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
+#define lio_dma_free(oct, size, virt_addr, dma_addr) \
+ dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
+
+static inline
+void *get_rbd(struct sk_buff *skb)
+{
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ va = page_address(pg_info->page) + pg_info->page_offset;
+
+ return va;
+}
+
+static inline u64
+lio_map_ring(void *buf)
+{
+ dma_addr_t dma_addr;
+
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octeon_skb_page_info *pg_info;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (!pg_info->page) {
+ pr_err("%s: pg_info->page NULL\n", __func__);
+ WARN_ON(1);
+ }
+
+ /* Get DMA info */
+ dma_addr = pg_info->dma;
+ if (!pg_info->dma) {
+ pr_err("%s: ERROR it should be already available\n",
+ __func__);
+ WARN_ON(1);
+ }
+ dma_addr += pg_info->page_offset;
+
+ return (u64)dma_addr;
+}
+
+static inline void
+lio_unmap_ring(struct pci_dev *pci_dev,
+ u64 buf_ptr)
+
+{
+ dma_unmap_page(&pci_dev->dev,
+ buf_ptr, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+}
+
+static inline void *octeon_fast_packet_alloc(u32 size)
+{
+ return recv_buffer_fast_alloc(size);
+}
+
+static inline void octeon_fast_packet_next(struct octeon_droq *droq,
+ struct sk_buff *nicbuf,
+ int copy_len,
+ int idx)
+{
+ skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
+ copy_len);
+}
+
+/**
+ * \brief check interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to check
+ */
+static inline int ifstate_check(struct lio *lio, int state_flag)
+{
+ return atomic_read(&lio->ifstate) & state_flag;
+}
+
+/**
+ * \brief set interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to set
+ */
+static inline void ifstate_set(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
+}
+
+/**
+ * \brief clear interface state
+ * @param lio per-network private data
+ * @param state_flag flag state to clear
+ */
+static inline void ifstate_reset(struct lio *lio, int state_flag)
+{
+ atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
+}
+
+/**
+ * \brief wait for all pending requests to complete
+ * @param oct Pointer to Octeon device
+ *
+ * Called during shutdown sequence
+ */
+static inline int wait_for_pending_requests(struct octeon_device *oct)
+{
+ int i, pcount = 0;
+
+ for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
+ pcount = atomic_read(
+ &oct->response_list[OCTEON_ORDERED_SC_LIST]
+ .pending_req_count);
+ if (pcount)
+ schedule_timeout_uninterruptible(HZ / 10);
+ else
+ break;
+ }
+
+ if (pcount)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * \brief Stop Tx queues
+ * @param netdev network device
+ */
+static inline void stop_txqs(struct net_device *netdev)
+{
+ int i;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ netif_stop_subqueue(netdev, i);
+}
+
+/**
+ * \brief Wake Tx queues
+ * @param netdev network device
+ */
+static inline void wake_txqs(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int i, qno;
+
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
+
+ if (__netif_subqueue_stopped(netdev, i)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, i);
+ }
+ }
+}
+
+/**
+ * \brief Start Tx queues
+ * @param netdev network device
+ */
+static inline void start_txqs(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ int i;
+
+ if (lio->linfo.link.s.link_up) {
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ netif_start_subqueue(netdev, i);
+ }
+}
+
+static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
+{
+ return skb->queue_mapping % oct->num_iqs;
+}
+
+/**
+ * Remove the node at the head of the list. The list would be empty at
+ * the end of this call if there are no more nodes in the list.
+ */
+static inline struct list_head *lio_list_delete_head(struct list_head *root)
+{
+ struct list_head *node;
+
+ if (list_empty_careful(root))
+ node = NULL;
+ else
+ node = root->next;
+
+ if (node)
+ list_del(node);
+
+ return node;
+}
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
new file mode 100644
index 000000000..1a706f81b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -0,0 +1,198 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_nic.h"
+#include "octeon_main.h"
+
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ u32 rdatasize)
+{
+ struct octeon_soft_command *sc;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, 0, rdatasize, 0);
+
+ if (!sc)
+ return NULL;
+
+ /* Copy existing command structure into the soft command */
+ memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
+
+ /* Add in the response related fields. Opcode and Param are already
+ * there.
+ */
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ /*pkiih3 + irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ }
+
+ irh->rflag = 1; /* a response is required */
+
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = rdatasize;
+
+ *sc->status_word = COMPLETION_WORD_INIT;
+
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct))
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ else
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+
+ sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
+
+ return sc;
+}
+
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ int xmit_more)
+{
+ int ring_doorbell = !xmit_more;
+
+ return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd,
+ ndata->buf, ndata->datasize,
+ ndata->reqtype);
+}
+
+static inline struct octeon_soft_command
+*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl)
+{
+ struct octeon_soft_command *sc = NULL;
+ u8 *data;
+ u32 rdatasize;
+ u32 uddsize = 0, datasize = 0;
+
+ uddsize = (u32)(nctrl->ncmd.s.more * 8);
+
+ datasize = OCTNET_CMD_SIZE + uddsize;
+ rdatasize = 16;
+
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct, datasize, rdatasize, 0);
+
+ if (!sc)
+ return NULL;
+
+ data = (u8 *)sc->virtdptr;
+
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+
+ octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
+
+ if (uddsize) {
+ /* Endian-Swap for UDD should have been done by caller. */
+ memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
+ }
+
+ sc->iq_no = (u32)nctrl->iq_no;
+
+ octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
+ 0, 0, 0);
+
+ init_completion(&sc->complete);
+ sc->sc_status = OCTEON_REQUEST_PENDING;
+
+ return sc;
+}
+
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl)
+{
+ int retval;
+ struct octeon_soft_command *sc = NULL;
+
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ /* Allow only rx ctrl command to stop traffic on the chip
+ * during offline operations
+ */
+ if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+ (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ dev_err(&oct->pci_dev->dev,
+ "%s cmd:%d not processed since driver offline\n",
+ __func__, nctrl->ncmd.s.cmd);
+ return -1;
+ }
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
+ if (!sc) {
+ dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
+ __func__);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ return -1;
+ }
+
+ retval = octeon_send_soft_command(oct, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct, sc);
+ dev_err(&oct->pci_dev->dev, "%s pf_num:%d soft command:%d send failed status: %x\n",
+ __func__, oct->pf_num, nctrl->ncmd.s.cmd, retval);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ return -1;
+ }
+
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
+ if (nctrl->ncmd.s.cmdgroup == 0) {
+ switch (nctrl->ncmd.s.cmd) {
+ /* caller holds lock, can not sleep */
+ case OCTNET_CMD_CHANGE_DEVFLAGS:
+ case OCTNET_CMD_SET_MULTI_LIST:
+ case OCTNET_CMD_SET_UC_LIST:
+ WRITE_ONCE(sc->caller_is_done, true);
+ return retval;
+ }
+ }
+
+ retval = wait_for_sc_completion_timeout(oct, sc, 0);
+ if (retval)
+ return (retval);
+
+ nctrl->sc_status = sc->sc_status;
+ retval = nctrl->sc_status;
+ if (nctrl->cb_fn)
+ nctrl->cb_fn(nctrl);
+
+ WRITE_ONCE(sc->caller_is_done, true);
+
+ return retval;
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
new file mode 100644
index 000000000..87dd6f89c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -0,0 +1,288 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+
+/*! \file octeon_nic.h
+ * \brief Host NIC Driver: Routine to send network data &
+ * control packet to Octeon.
+ */
+
+#ifndef __OCTEON_NIC_H__
+#define __OCTEON_NIC_H__
+
+/* Maximum number of 8-byte words can be sent in a NIC control message.
+ */
+#define MAX_NCTRL_UDD 32
+
+typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *);
+
+/* Structure of control information passed by the NIC module to the OSI
+ * layer when sending control commands to Octeon device software.
+ */
+struct octnic_ctrl_pkt {
+ /** Command to be passed to the Octeon device software. */
+ union octnet_cmd ncmd;
+
+ /** Send buffer */
+ void *data;
+ u64 dmadata;
+
+ /** Response buffer */
+ void *rdata;
+ u64 dmardata;
+
+ /** Additional data that may be needed by some commands. */
+ u64 udd[MAX_NCTRL_UDD];
+
+ /** Input queue to use to send this command. */
+ u64 iq_no;
+
+ /** The network device that issued the control command. */
+ u64 netpndev;
+
+ /** Callback function called when the command has been fetched */
+ octnic_ctrl_pkt_cb_fn_t cb_fn;
+
+ u32 sc_status;
+};
+
+#define MAX_UDD_SIZE(nctrl) (sizeof((nctrl)->udd))
+
+/** Structure of data information passed by the NIC module to the OSI
+ * layer when forwarding data to Octeon device software.
+ */
+struct octnic_data_pkt {
+ /** Pointer to information maintained by NIC module for this packet. The
+ * OSI layer passes this as-is to the driver.
+ */
+ void *buf;
+
+ /** Type of buffer passed in "buf" above. */
+ u32 reqtype;
+
+ /** Total data bytes to be transferred in this command. */
+ u32 datasize;
+
+ /** Command to be passed to the Octeon device software. */
+ union octeon_instr_64B cmd;
+
+ /** Input queue to use to send this command. */
+ u32 q_no;
+
+};
+
+/** Structure passed by NIC module to OSI layer to prepare a command to send
+ * network data to Octeon.
+ */
+union octnic_cmd_setup {
+ struct {
+ u32 iq_no:8;
+ u32 gather:1;
+ u32 timestamp:1;
+ u32 ip_csum:1;
+ u32 transport_csum:1;
+ u32 tnl_csum:1;
+ u32 rsvd:19;
+
+ union {
+ u32 datasize;
+ u32 gatherptrs;
+ } u;
+ } s;
+
+ u64 u64;
+
+};
+
+static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
+{
+ return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
+ >= (oct->instr_queue[q_no]->max_count - 2));
+}
+
+static inline void
+octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_irh *irh;
+ union octnic_packet_params packet_params;
+ int port;
+
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
+
+ ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[0], ossp[1] for a total of 32 bytes
+ */
+ ih2->fsz = LIO_PCICMD_O2;
+
+ ih2->tagtype = ORDERED_TAG;
+ ih2->grp = DEFAULT_POW_GRP;
+
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ ih2->tag = tag;
+ else
+ ih2->tag = LIO_DATA(port);
+
+ ih2->raw = 1;
+ ih2->qos = (port & 3) + 4; /* map qos based on interface */
+
+ if (!setup->s.gather) {
+ ih2->dlengsz = setup->s.u.datasize;
+ } else {
+ ih2->gather = 1;
+ ih2->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+static inline void
+octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ union octnic_packet_params packet_params;
+ int port;
+
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /*PKI IH*/
+ ih3->fsz = LIO_PCICMD_O3;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
+ }
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = ORDERED_TAG;
+ pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
+ pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ if (OCTEON_CN6XXX(oct))
+ octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
+ else
+ octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
+}
+
+/** Allocate and a soft command with space for a response immediately following
+ * the commnad.
+ * @param oct - octeon device pointer
+ * @param cmd - pointer to the command structure, pre-filled for everything
+ * except the response.
+ * @param rdatasize - size in bytes of the response.
+ *
+ * @returns pointer to allocated buffer with command copied into it, and
+ * response space immediately following.
+ */
+void *
+octeon_alloc_soft_command_resp(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ u32 rdatasize);
+
+/** Send a NIC data packet to the device
+ * @param oct - octeon device pointer
+ * @param ndata - control structure with queueing, and buffer information
+ *
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int octnet_send_nic_data_pkt(struct octeon_device *oct,
+ struct octnic_data_pkt *ndata,
+ int xmit_more);
+
+/** Send a NIC control packet to the device
+ * @param oct - octeon device pointer
+ * @param nctrl - control structure with command, timout, and callback info
+ * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
+ * queue should be stopped, and IQ_SEND_OK if it sent okay.
+ */
+int
+octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
+ struct octnic_ctrl_pkt *nctrl);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
new file mode 100644
index 000000000..8e59c2825
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -0,0 +1,936 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+#include "octeon_network.h"
+#include "cn66xx_device.h"
+#include "cn23xx_pf_device.h"
+#include "cn23xx_vf_device.h"
+
+struct iq_post_status {
+ int status;
+ int index;
+};
+
+static void check_db_timeout(struct work_struct *work);
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
+
+static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
+
+static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
+{
+ struct octeon_instr_queue *iq =
+ (struct octeon_instr_queue *)oct->instr_queue[iq_no];
+ return iq->iqcmd_64B;
+}
+
+#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no))
+
+/* Define this to return the request status comaptible to old code */
+/*#define OCTEON_USE_OLD_REQ_STATUS*/
+
+/* Return 0 on success, 1 on failure */
+int octeon_init_instr_queue(struct octeon_device *oct,
+ union oct_txpciq txpciq,
+ u32 num_descs)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_iq_config *conf = NULL;
+ u32 iq_no = (u32)txpciq.s.q_no;
+ u32 q_size;
+ struct cavium_wq *db_wq;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ if (OCTEON_CN6XXX(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx)));
+ else if (OCTEON_CN23XX_PF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf)));
+ else if (OCTEON_CN23XX_VF(oct))
+ conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_vf)));
+
+ if (!conf) {
+ dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
+ oct->chip_id);
+ return 1;
+ }
+
+ q_size = (u32)conf->instr_type * num_descs;
+
+ iq = oct->instr_queue[iq_no];
+
+ iq->oct_dev = oct;
+
+ iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma);
+ if (!iq->base_addr) {
+ dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
+ iq_no);
+ return 1;
+ }
+
+ iq->max_count = num_descs;
+
+ /* Initialize a list to holds requests that have been posted to Octeon
+ * but has yet to be fetched by octeon
+ */
+ iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)),
+ numa_node);
+ if (!iq->request_list)
+ iq->request_list = vzalloc(array_size(num_descs, sizeof(*iq->request_list)));
+ if (!iq->request_list) {
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
+ iq_no);
+ return 1;
+ }
+
+ dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %pad count: %d\n",
+ iq_no, iq->base_addr, &iq->base_addr_dma, iq->max_count);
+
+ iq->txpciq.u64 = txpciq.u64;
+ iq->fill_threshold = (u32)conf->db_min;
+ iq->fill_cnt = 0;
+ iq->host_write_index = 0;
+ iq->octeon_read_index = 0;
+ iq->flush_index = 0;
+ iq->last_db_time = 0;
+ iq->do_auto_flush = 1;
+ iq->db_timeout = (u32)conf->db_timeout;
+ atomic_set(&iq->instr_pending, 0);
+ iq->pkts_processed = 0;
+
+ /* Initialize the spinlock for this instruction queue */
+ spin_lock_init(&iq->lock);
+ if (iq_no == 0) {
+ iq->allow_soft_cmds = true;
+ spin_lock_init(&iq->post_lock);
+ } else {
+ iq->allow_soft_cmds = false;
+ }
+
+ spin_lock_init(&iq->iq_flush_running_lock);
+
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+
+ oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
+ WQ_MEM_RECLAIM,
+ 0);
+ if (!oct->check_db_wq[iq_no].wq) {
+ vfree(iq->request_list);
+ iq->request_list = NULL;
+ lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
+ dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
+ iq_no);
+ return 1;
+ }
+
+ db_wq = &oct->check_db_wq[iq_no];
+
+ INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
+ db_wq->wk.ctxptr = oct;
+ db_wq->wk.ctxul = iq_no;
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+
+ return 0;
+}
+
+int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
+{
+ u64 desc_size = 0, q_size;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
+ destroy_workqueue(oct->check_db_wq[iq_no].wq);
+
+ if (OCTEON_CN6XXX(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn6xxx));
+ else if (OCTEON_CN23XX_PF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_pf));
+ else if (OCTEON_CN23XX_VF(oct))
+ desc_size =
+ CFG_GET_IQ_INSTR_TYPE(CHIP_CONF(oct, cn23xx_vf));
+
+ vfree(iq->request_list);
+
+ if (iq->base_addr) {
+ q_size = iq->max_count * desc_size;
+ lio_dma_free(oct, (u32)q_size, iq->base_addr,
+ iq->base_addr_dma);
+ oct->io_qmask.iq &= ~(1ULL << iq_no);
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ oct->num_iqs--;
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 0 on success, 1 on failure */
+int octeon_setup_iq(struct octeon_device *oct,
+ int ifidx,
+ int q_index,
+ union oct_txpciq txpciq,
+ u32 num_descs,
+ void *app_ctx)
+{
+ u32 iq_no = (u32)txpciq.s.q_no;
+ int numa_node = dev_to_node(&oct->pci_dev->dev);
+
+ if (oct->instr_queue[iq_no]) {
+ dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
+ iq_no);
+ oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ return 0;
+ }
+ oct->instr_queue[iq_no] =
+ vzalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ if (!oct->instr_queue[iq_no])
+ oct->instr_queue[iq_no] =
+ vzalloc(sizeof(struct octeon_instr_queue));
+ if (!oct->instr_queue[iq_no])
+ return 1;
+
+
+ oct->instr_queue[iq_no]->q_index = q_index;
+ oct->instr_queue[iq_no]->app_ctx = app_ctx;
+ oct->instr_queue[iq_no]->ifidx = ifidx;
+
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
+ vfree(oct->instr_queue[iq_no]);
+ oct->instr_queue[iq_no] = NULL;
+ return 1;
+ }
+
+ oct->num_iqs++;
+ if (oct->fn_list.enable_io_queues(oct)) {
+ octeon_delete_instr_queue(oct, iq_no);
+ return 1;
+ }
+
+ return 0;
+}
+
+int lio_wait_for_instr_fetch(struct octeon_device *oct)
+{
+ int i, retry = 1000, pending, instr_cnt = 0;
+
+ do {
+ instr_cnt = 0;
+
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & BIT_ULL(i)))
+ continue;
+ pending =
+ atomic_read(&oct->instr_queue[i]->instr_pending);
+ if (pending)
+ __check_db_timeout(oct, i);
+ instr_cnt += pending;
+ }
+
+ if (instr_cnt == 0)
+ break;
+
+ schedule_timeout_uninterruptible(1);
+
+ } while (retry-- && instr_cnt);
+
+ return instr_cnt;
+}
+
+static inline void
+ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq)
+{
+ if (atomic_read(&oct->status) == OCT_DEV_RUNNING) {
+ writel(iq->fill_cnt, iq->doorbell_reg);
+ /* make sure doorbell write goes through */
+ iq->fill_cnt = 0;
+ iq->last_db_time = jiffies;
+ return;
+ }
+}
+
+void
+octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no)
+{
+ struct octeon_instr_queue *iq;
+
+ iq = oct->instr_queue[iq_no];
+ spin_lock(&iq->post_lock);
+ if (iq->fill_cnt)
+ ring_doorbell(oct, iq);
+ spin_unlock(&iq->post_lock);
+}
+
+static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
+ u8 *cmd)
+{
+ u8 *iqptr, cmdsize;
+
+ cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
+ iqptr = iq->base_addr + (cmdsize * iq->host_write_index);
+
+ memcpy(iqptr, cmd, cmdsize);
+}
+
+static inline struct iq_post_status
+__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
+{
+ struct iq_post_status st;
+
+ st.status = IQ_SEND_OK;
+
+ /* This ensures that the read index does not wrap around to the same
+ * position if queue gets full before Octeon could fetch any instr.
+ */
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) {
+ st.status = IQ_SEND_FAILED;
+ st.index = -1;
+ return st;
+ }
+
+ if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2))
+ st.status = IQ_SEND_STOP;
+
+ __copy_cmd_into_iq(iq, cmd);
+
+ /* "index" is returned, host_write_index is modified. */
+ st.index = iq->host_write_index;
+ iq->host_write_index = incr_index(iq->host_write_index, 1,
+ iq->max_count);
+ iq->fill_cnt++;
+
+ /* Flush the command into memory. We need to be sure the data is in
+ * memory before indicating that the instruction is pending.
+ */
+ wmb();
+
+ atomic_inc(&iq->instr_pending);
+
+ return st;
+}
+
+int
+octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
+ void (*fn)(void *))
+{
+ if (reqtype > REQTYPE_LAST) {
+ dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n",
+ __func__, reqtype);
+ return -EINVAL;
+ }
+
+ reqtype_free_fn[oct->octeon_id][reqtype] = fn;
+
+ return 0;
+}
+
+static inline void
+__add_to_request_list(struct octeon_instr_queue *iq,
+ int idx, void *buf, int reqtype)
+{
+ iq->request_list[idx].buf = buf;
+ iq->request_list[idx].reqtype = reqtype;
+}
+
+/* Can only run in process context */
+int
+lio_process_iq_request_list(struct octeon_device *oct,
+ struct octeon_instr_queue *iq, u32 napi_budget)
+{
+ struct cavium_wq *cwq = &oct->dma_comp_wq;
+ int reqtype;
+ void *buf;
+ u32 old = iq->flush_index;
+ u32 inst_count = 0;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct octeon_soft_command *sc;
+ unsigned long flags;
+
+ while (old != iq->octeon_read_index) {
+ reqtype = iq->request_list[old].reqtype;
+ buf = iq->request_list[old].buf;
+
+ if (reqtype == REQTYPE_NONE)
+ goto skip_this;
+
+ octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl,
+ &bytes_compl);
+
+ switch (reqtype) {
+ case REQTYPE_NORESP_NET:
+ case REQTYPE_NORESP_NET_SG:
+ case REQTYPE_RESP_NET_SG:
+ reqtype_free_fn[oct->octeon_id][reqtype](buf);
+ break;
+ case REQTYPE_RESP_NET:
+ case REQTYPE_SOFT_COMMAND:
+ sc = buf;
+ /* We're expecting a response from Octeon.
+ * It's up to lio_process_ordered_list() to
+ * process sc. Add sc to the ordered soft
+ * command response list because we expect
+ * a response from Octeon.
+ */
+ spin_lock_irqsave(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock, flags);
+ atomic_inc(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count);
+ list_add_tail(&sc->node, &oct->response_list
+ [OCTEON_ORDERED_SC_LIST].head);
+ spin_unlock_irqrestore(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
+ break;
+ default:
+ dev_err(&oct->pci_dev->dev,
+ "%s Unknown reqtype: %d buf: %p at idx %d\n",
+ __func__, reqtype, buf, old);
+ }
+
+ iq->request_list[old].buf = NULL;
+ iq->request_list[old].reqtype = 0;
+
+ skip_this:
+ inst_count++;
+ old = incr_index(old, 1, iq->max_count);
+
+ if ((napi_budget) && (inst_count >= napi_budget))
+ break;
+ }
+ if (bytes_compl)
+ octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
+ bytes_compl);
+ iq->flush_index = old;
+
+ if (atomic_read(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count))
+ queue_work(cwq->wq, &cwq->wk.work.work);
+
+ return inst_count;
+}
+
+/* Can only be called from process context */
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 napi_budget)
+{
+ u32 inst_processed = 0;
+ u32 tot_inst_processed = 0;
+ int tx_done = 1;
+
+ if (!spin_trylock(&iq->iq_flush_running_lock))
+ return tx_done;
+
+ spin_lock_bh(&iq->lock);
+
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
+
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->octeon_read_index)
+ break;
+
+ if (napi_budget)
+ inst_processed =
+ lio_process_iq_request_list(oct, iq,
+ napi_budget -
+ tot_inst_processed);
+ else
+ inst_processed =
+ lio_process_iq_request_list(oct, iq, 0);
+
+ if (inst_processed) {
+ iq->pkts_processed += inst_processed;
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ } while (tot_inst_processed < napi_budget);
+
+ if (napi_budget && (tot_inst_processed >= napi_budget))
+ tx_done = 0;
+
+ iq->last_db_time = jiffies;
+
+ spin_unlock_bh(&iq->lock);
+
+ spin_unlock(&iq->iq_flush_running_lock);
+
+ return tx_done;
+}
+
+/* Process instruction queue after timeout.
+ * This routine gets called from a workqueue or when removing the module.
+ */
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
+{
+ struct octeon_instr_queue *iq;
+ u64 next_time;
+
+ if (!oct)
+ return;
+
+ iq = oct->instr_queue[iq_no];
+ if (!iq)
+ return;
+
+ /* return immediately, if no work pending */
+ if (!atomic_read(&iq->instr_pending))
+ return;
+ /* If jiffies - last_db_time < db_timeout do nothing */
+ next_time = iq->last_db_time + iq->db_timeout;
+ if (!time_after(jiffies, (unsigned long)next_time))
+ return;
+ iq->last_db_time = jiffies;
+
+ /* Flush the instruction queue */
+ octeon_flush_iq(oct, iq, 0);
+
+ lio_enable_irq(NULL, iq);
+}
+
+/* Called by the Poll thread at regular intervals to check the instruction
+ * queue for commands to be posted and for commands that were fetched by Octeon.
+ */
+static void check_db_timeout(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ u64 iq_no = wk->ctxul;
+ struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+ u32 delay = 10;
+
+ __check_db_timeout(oct, iq_no);
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
+}
+
+int
+octeon_send_command(struct octeon_device *oct, u32 iq_no,
+ u32 force_db, void *cmd, void *buf,
+ u32 datasize, u32 reqtype)
+{
+ int xmit_stopped;
+ struct iq_post_status st;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
+
+ /* Get the lock and prevent other tasks and tx interrupt handler from
+ * running.
+ */
+ if (iq->allow_soft_cmds)
+ spin_lock_bh(&iq->post_lock);
+
+ st = __post_command2(iq, cmd);
+
+ if (st.status != IQ_SEND_FAILED) {
+ xmit_stopped = octeon_report_sent_bytes_to_bql(buf, reqtype);
+ __add_to_request_list(iq, st.index, buf, reqtype);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
+
+ if (iq->fill_cnt >= MAX_OCTEON_FILL_COUNT || force_db ||
+ xmit_stopped || st.status == IQ_SEND_STOP)
+ ring_doorbell(oct, iq);
+ } else {
+ INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
+ }
+
+ if (iq->allow_soft_cmds)
+ spin_unlock_bh(&iq->post_lock);
+
+ /* This is only done here to expedite packets being flushed
+ * for cases where there are no IQ completion interrupts.
+ */
+
+ return st.status;
+}
+
+void
+octeon_prepare_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc,
+ u8 opcode,
+ u8 subcode,
+ u32 irh_ossp,
+ u64 ossp0,
+ u64 ossp1)
+{
+ struct octeon_config *oct_cfg;
+ struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_rdp *rdp;
+
+ WARN_ON(opcode > 15);
+ WARN_ON(subcode > 127);
+
+ oct_cfg = octeon_get_conf(oct);
+
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+
+ ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind;
+
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
+
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->uqpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg;
+ pki_ih3->utt = 1;
+ pki_ih3->tag = LIO_CONTROL;
+ pki_ih3->tagtype = ATOMIC_TAG;
+ pki_ih3->qpg =
+ oct->instr_queue[sc->iq_no]->txpciq.s.ctrl_qpg;
+
+ pki_ih3->pm = 0x7;
+ pki_ih3->sl = 8;
+
+ if (sc->datasize)
+ ih3->dlengsz = sc->datasize;
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd3.ossp[0] = ossp0;
+ sc->cmd.cmd3.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /*PKI IH3*/
+ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */
+ ih3->fsz = LIO_SOFTCMDRESP_IH3;
+ } else {
+ irh->rflag = 0;
+ /*PKI IH3*/
+ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */
+ ih3->fsz = LIO_PCICMD_O3;
+ }
+
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+
+ if (sc->datasize) {
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
+ }
+
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ irh->opcode = opcode;
+ irh->subcode = subcode;
+
+ /* opcode/subcode specific parameters (ossp) */
+ irh->ossp = irh_ossp;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
+
+ if (sc->rdatasize) {
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rdp->pcie_port = oct->pcie_port;
+ rdp->rlen = sc->rdatasize;
+
+ irh->rflag = 1;
+ /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = LIO_SOFTCMDRESP_IH2;
+ } else {
+ irh->rflag = 0;
+ /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = LIO_PCICMD_O2;
+ }
+ }
+}
+
+int octeon_send_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ struct octeon_instr_queue *iq;
+ struct octeon_instr_ih2 *ih2;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_irh *irh;
+ u32 len;
+
+ iq = oct->instr_queue[sc->iq_no];
+ if (!iq->allow_soft_cmds) {
+ dev_err(&oct->pci_dev->dev, "Soft commands are not allowed on Queue %d\n",
+ sc->iq_no);
+ INCR_INSTRQUEUE_PKT_COUNT(oct, sc->iq_no, instr_dropped, 1);
+ return IQ_SEND_FAILED;
+ }
+
+ if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) {
+ ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3;
+ if (ih3->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd3.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd3.rptr = sc->dmarptr;
+ }
+ len = (u32)ih3->dlengsz;
+ } else {
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
+ }
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ if (irh->rflag) {
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
+ *sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+ }
+ len = (u32)ih2->dlengsz;
+ }
+
+ sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS);
+
+ return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ len, REQTYPE_SOFT_COMMAND));
+}
+
+int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
+{
+ int i;
+ u64 dma_addr;
+ struct octeon_soft_command *sc;
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+ spin_lock_init(&oct->sc_buf_pool.lock);
+ atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0);
+
+ for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) {
+ sc = (struct octeon_soft_command *)
+ lio_dma_alloc(oct,
+ SOFT_COMMAND_BUFFER_SIZE,
+ (dma_addr_t *)&dma_addr);
+ if (!sc) {
+ octeon_free_sc_buffer_pool(oct);
+ return 1;
+ }
+
+ sc->dma_addr = dma_addr;
+ sc->size = SOFT_COMMAND_BUFFER_SIZE;
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+ }
+
+ return 0;
+}
+
+int octeon_free_sc_done_list(struct octeon_device *oct)
+{
+ struct octeon_response_list *done_sc_list, *zombie_sc_list;
+ struct octeon_soft_command *sc;
+ struct list_head *tmp, *tmp2;
+ spinlock_t *sc_lists_lock; /* lock for response_list */
+
+ done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST];
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+
+ if (!atomic_read(&done_sc_list->pending_req_count))
+ return 0;
+
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+ spin_lock_bh(sc_lists_lock);
+
+ list_for_each_safe(tmp, tmp2, &done_sc_list->head) {
+ sc = list_entry(tmp, struct octeon_soft_command, node);
+
+ if (READ_ONCE(sc->caller_is_done)) {
+ list_del(&sc->node);
+ atomic_dec(&done_sc_list->pending_req_count);
+
+ if (*sc->status_word == COMPLETION_WORD_INIT) {
+ /* timeout; move sc to zombie list */
+ list_add_tail(&sc->node, &zombie_sc_list->head);
+ atomic_inc(&zombie_sc_list->pending_req_count);
+ } else {
+ octeon_free_soft_command(oct, sc);
+ }
+ }
+ }
+
+ spin_unlock_bh(sc_lists_lock);
+
+ return 0;
+}
+
+int octeon_free_sc_zombie_list(struct octeon_device *oct)
+{
+ struct octeon_response_list *zombie_sc_list;
+ struct octeon_soft_command *sc;
+ struct list_head *tmp, *tmp2;
+ spinlock_t *sc_lists_lock; /* lock for response_list */
+
+ zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST];
+ sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock;
+
+ spin_lock_bh(sc_lists_lock);
+
+ list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) {
+ list_del(tmp);
+ atomic_dec(&zombie_sc_list->pending_req_count);
+ sc = list_entry(tmp, struct octeon_soft_command, node);
+ octeon_free_soft_command(oct, sc);
+ }
+
+ spin_unlock_bh(sc_lists_lock);
+
+ return 0;
+}
+
+int octeon_free_sc_buffer_pool(struct octeon_device *oct)
+{
+ struct list_head *tmp, *tmp2;
+ struct octeon_soft_command *sc;
+
+ octeon_free_sc_zombie_list(oct);
+
+ spin_lock_bh(&oct->sc_buf_pool.lock);
+
+ list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
+ list_del(tmp);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ lio_dma_free(oct, sc->size, sc, sc->dma_addr);
+ }
+
+ INIT_LIST_HEAD(&oct->sc_buf_pool.head);
+
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
+
+ return 0;
+}
+
+struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
+ u32 datasize,
+ u32 rdatasize,
+ u32 ctxsize)
+{
+ u64 dma_addr;
+ u32 size;
+ u32 offset = sizeof(struct octeon_soft_command);
+ struct octeon_soft_command *sc = NULL;
+ struct list_head *tmp;
+
+ if (!rdatasize)
+ rdatasize = 16;
+
+ WARN_ON((offset + datasize + rdatasize + ctxsize) >
+ SOFT_COMMAND_BUFFER_SIZE);
+
+ spin_lock_bh(&oct->sc_buf_pool.lock);
+
+ if (list_empty(&oct->sc_buf_pool.head)) {
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
+ return NULL;
+ }
+
+ list_for_each(tmp, &oct->sc_buf_pool.head)
+ break;
+
+ list_del(tmp);
+
+ atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
+
+ sc = (struct octeon_soft_command *)tmp;
+
+ dma_addr = sc->dma_addr;
+ size = sc->size;
+
+ memset(sc, 0, sc->size);
+
+ sc->dma_addr = dma_addr;
+ sc->size = size;
+
+ if (ctxsize) {
+ sc->ctxptr = (u8 *)sc + offset;
+ sc->ctxsize = ctxsize;
+ }
+
+ /* Start data at 128 byte boundary */
+ offset = (offset + ctxsize + 127) & 0xffffff80;
+
+ if (datasize) {
+ sc->virtdptr = (u8 *)sc + offset;
+ sc->dmadptr = dma_addr + offset;
+ sc->datasize = datasize;
+ }
+
+ /* Start rdata at 128 byte boundary */
+ offset = (offset + datasize + 127) & 0xffffff80;
+
+ if (rdatasize) {
+ WARN_ON(rdatasize < 16);
+ sc->virtrptr = (u8 *)sc + offset;
+ sc->dmarptr = dma_addr + offset;
+ sc->rdatasize = rdatasize;
+ sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8);
+ }
+
+ return sc;
+}
+
+void octeon_free_soft_command(struct octeon_device *oct,
+ struct octeon_soft_command *sc)
+{
+ spin_lock_bh(&oct->sc_buf_pool.lock);
+
+ list_add_tail(&sc->node, &oct->sc_buf_pool.head);
+
+ atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
+
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
new file mode 100644
index 000000000..ac7747ccf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -0,0 +1,234 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include "liquidio_common.h"
+#include "octeon_droq.h"
+#include "octeon_iq.h"
+#include "response_manager.h"
+#include "octeon_device.h"
+#include "octeon_main.h"
+
+static void oct_poll_req_completion(struct work_struct *work);
+
+int octeon_setup_response_list(struct octeon_device *oct)
+{
+ int i, ret = 0;
+ struct cavium_wq *cwq;
+
+ for (i = 0; i < MAX_RESPONSE_LISTS; i++) {
+ INIT_LIST_HEAD(&oct->response_list[i].head);
+ spin_lock_init(&oct->response_list[i].lock);
+ atomic_set(&oct->response_list[i].pending_req_count, 0);
+ }
+ spin_lock_init(&oct->cmd_resp_wqlock);
+
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
+ if (!oct->dma_comp_wq.wq) {
+ dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
+ return -ENOMEM;
+ }
+
+ cwq = &oct->dma_comp_wq;
+ INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
+ cwq->wk.ctxptr = oct;
+ oct->cmd_resp_state = OCT_DRV_ONLINE;
+
+ return ret;
+}
+
+void octeon_delete_response_list(struct octeon_device *oct)
+{
+ cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
+ destroy_workqueue(oct->dma_comp_wq.wq);
+}
+
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit)
+{
+ struct octeon_response_list *ordered_sc_list;
+ struct octeon_soft_command *sc;
+ int request_complete = 0;
+ int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
+ u32 status;
+ u64 status64;
+
+ octeon_free_sc_done_list(octeon_dev);
+
+ ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
+
+ do {
+ spin_lock_bh(&ordered_sc_list->lock);
+
+ if (list_empty(&ordered_sc_list->head)) {
+ spin_unlock_bh(&ordered_sc_list->lock);
+ return 1;
+ }
+
+ sc = list_first_entry(&ordered_sc_list->head,
+ struct octeon_soft_command, node);
+
+ status = OCTEON_REQUEST_PENDING;
+
+ /* check if octeon has finished DMA'ing a response
+ * to where rptr is pointing to
+ */
+ status64 = *sc->status_word;
+
+ if (status64 != COMPLETION_WORD_INIT) {
+ /* This logic ensures that all 64b have been written.
+ * 1. check byte 0 for non-FF
+ * 2. if non-FF, then swap result from BE to host order
+ * 3. check byte 7 (swapped to 0) for non-FF
+ * 4. if non-FF, use the low 32-bit status code
+ * 5. if either byte 0 or byte 7 is FF, don't use status
+ */
+ if ((status64 & 0xff) != 0xff) {
+ octeon_swap_8B_data(&status64, 1);
+ if (((status64 & 0xff) != 0xff)) {
+ /* retrieve 16-bit firmware status */
+ status = (u32)(status64 & 0xffffULL);
+ if (status) {
+ status =
+ FIRMWARE_STATUS_CODE(status);
+ } else {
+ /* i.e. no error */
+ status = OCTEON_REQUEST_DONE;
+ }
+ }
+ }
+ } else if (unlikely(force_quit) || (sc->expiry_time &&
+ time_after(jiffies, (unsigned long)sc->expiry_time))) {
+ struct octeon_instr_irh *irh =
+ (struct octeon_instr_irh *)&sc->cmd.cmd3.irh;
+
+ dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__);
+ dev_err(&octeon_dev->pci_dev->dev,
+ "cmd %x/%x/%llx/%llx failed, ",
+ irh->opcode, irh->subcode,
+ sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]);
+ dev_err(&octeon_dev->pci_dev->dev,
+ "timeout (%ld, %ld)\n",
+ (long)jiffies, (long)sc->expiry_time);
+ status = OCTEON_REQUEST_TIMEOUT;
+ }
+
+ if (status != OCTEON_REQUEST_PENDING) {
+ sc->sc_status = status;
+
+ /* we have received a response or we have timed out */
+ /* remove node from linked list */
+ list_del(&sc->node);
+ atomic_dec(&octeon_dev->response_list
+ [OCTEON_ORDERED_SC_LIST].
+ pending_req_count);
+
+ if (!sc->callback) {
+ atomic_inc(&octeon_dev->response_list
+ [OCTEON_DONE_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node,
+ &octeon_dev->response_list
+ [OCTEON_DONE_SC_LIST].head);
+
+ if (unlikely(READ_ONCE(sc->caller_is_done))) {
+ /* caller does not wait for response
+ * from firmware
+ */
+ if (status != OCTEON_REQUEST_DONE) {
+ struct octeon_instr_irh *irh;
+
+ irh =
+ (struct octeon_instr_irh *)
+ &sc->cmd.cmd3.irh;
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "%s: sc failed: opcode=%x, ",
+ __func__, irh->opcode);
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "subcode=%x, ossp[0]=%llx, ",
+ irh->subcode,
+ sc->cmd.cmd3.ossp[0]);
+ dev_dbg
+ (&octeon_dev->pci_dev->dev,
+ "ossp[1]=%llx, status=%d\n",
+ sc->cmd.cmd3.ossp[1],
+ status);
+ }
+ } else {
+ complete(&sc->complete);
+ }
+
+ spin_unlock_bh(&ordered_sc_list->lock);
+ } else {
+ /* sc with callback function */
+ if (status == OCTEON_REQUEST_TIMEOUT) {
+ atomic_inc(&octeon_dev->response_list
+ [OCTEON_ZOMBIE_SC_LIST].
+ pending_req_count);
+ list_add_tail(&sc->node,
+ &octeon_dev->response_list
+ [OCTEON_ZOMBIE_SC_LIST].
+ head);
+ }
+
+ spin_unlock_bh(&ordered_sc_list->lock);
+
+ sc->callback(octeon_dev, status,
+ sc->callback_arg);
+ /* sc is freed by caller */
+ }
+
+ request_complete++;
+
+ } else {
+ /* no response yet */
+ request_complete = 0;
+ spin_unlock_bh
+ (&ordered_sc_list->lock);
+ }
+
+ /* If we hit the Max Ordered requests to process every loop,
+ * we quit
+ * and let this function be invoked the next time the poll
+ * thread runs
+ * to process the remaining requests. This function can take up
+ * the entire CPU if there is no upper limit to the requests
+ * processed.
+ */
+ if (request_complete >= resp_to_process)
+ break;
+ } while (request_complete);
+
+ return 0;
+}
+
+static void oct_poll_req_completion(struct work_struct *work)
+{
+ struct cavium_wk *wk = (struct cavium_wk *)work;
+ struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
+ struct cavium_wq *cwq = &oct->dma_comp_wq;
+
+ lio_process_ordered_list(oct, 0);
+
+ if (atomic_read(&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].pending_req_count))
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1));
+}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h
new file mode 100644
index 000000000..ed4020d26
--- /dev/null
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h
@@ -0,0 +1,143 @@
+/**********************************************************************
+ * Author: Cavium, Inc.
+ *
+ * Contact: support@cavium.com
+ * Please include "LiquidIO" in the subject.
+ *
+ * Copyright (c) 2003-2016 Cavium, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ **********************************************************************/
+
+/*! \file response_manager.h
+ * \brief Host Driver: Response queues for host instructions.
+ */
+
+#ifndef __RESPONSE_MANAGER_H__
+#define __RESPONSE_MANAGER_H__
+
+/** Maximum ordered requests to process in every invocation of
+ * lio_process_ordered_list(). The function will continue to process requests
+ * as long as it can find one that has finished processing. If it keeps
+ * finding requests that have completed, the function can run for ever. The
+ * value defined here sets an upper limit on the number of requests it can
+ * process before it returns control to the poll thread.
+ */
+#define MAX_ORD_REQS_TO_PROCESS 4096
+
+/** Head of a response list. There are several response lists in the
+ * system. One for each response order- Unordered, ordered
+ * and 1 for noresponse entries on each instruction queue.
+ */
+struct octeon_response_list {
+ /** List structure to add delete pending entries to */
+ struct list_head head;
+
+ /** A lock for this response list */
+ spinlock_t lock;
+
+ atomic_t pending_req_count;
+};
+
+/** The type of response list.
+ */
+enum {
+ OCTEON_ORDERED_LIST = 0,
+ OCTEON_UNORDERED_NONBLOCKING_LIST = 1,
+ OCTEON_UNORDERED_BLOCKING_LIST = 2,
+ OCTEON_ORDERED_SC_LIST = 3,
+ OCTEON_DONE_SC_LIST = 4,
+ OCTEON_ZOMBIE_SC_LIST = 5
+};
+
+/** Response Order values for a Octeon Request. */
+enum {
+ OCTEON_RESP_ORDERED = 0,
+ OCTEON_RESP_UNORDERED = 1,
+ OCTEON_RESP_NORESPONSE = 2
+};
+
+/** Error codes used in Octeon Host-Core communication.
+ *
+ * 31 16 15 0
+ * ---------------------------------
+ * | | |
+ * ---------------------------------
+ * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number,
+ * are reserved to identify the group to which the error code belongs. The
+ * lower 16-bits, called Minor Error Number, carry the actual code.
+ *
+ * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER.
+ */
+
+/*------------ Error codes used by host driver -----------------*/
+#define DRIVER_MAJOR_ERROR_CODE 0x0000
+/*------ Error codes used by firmware (bits 15..0 set by firmware */
+#define FIRMWARE_MAJOR_ERROR_CODE 0x0001
+
+/** A value of 0x00000000 indicates no error i.e. success */
+#define DRIVER_ERROR_NONE 0x00000000
+
+#define DRIVER_ERROR_REQ_PENDING 0x00000001
+#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003
+#define DRIVER_ERROR_REQ_EINTR 0x00000004
+#define DRIVER_ERROR_REQ_ENXIO 0x00000006
+#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C
+#define DRIVER_ERROR_REQ_EINVAL 0x00000016
+#define DRIVER_ERROR_REQ_FAILED 0x000000ff
+
+/** Status for a request.
+ * If a request is not queued to Octeon by the driver, the driver returns
+ * an error condition that's describe by one of the OCTEON_REQ_ERR_* value
+ * below. If the request is successfully queued, the driver will return
+ * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and
+ * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the
+ * response for request failed to arrive before a time-out period or if
+ * the request processing * got interrupted due to a signal respectively.
+ */
+enum {
+ OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE),
+ OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING),
+ OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT),
+ OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR),
+ OCTEON_REQUEST_NO_DEVICE = (0x00000021),
+ OCTEON_REQUEST_NOT_RUNNING,
+ OCTEON_REQUEST_INVALID_IQ,
+ OCTEON_REQUEST_INVALID_BUFCNT,
+ OCTEON_REQUEST_INVALID_RESP_ORDER,
+ OCTEON_REQUEST_NO_MEMORY,
+ OCTEON_REQUEST_INVALID_BUFSIZE,
+ OCTEON_REQUEST_NO_PENDING_ENTRY,
+ OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF)
+
+};
+
+#define FIRMWARE_STATUS_CODE(status) \
+ ((FIRMWARE_MAJOR_ERROR_CODE << 16) | (status))
+
+/** Initialize the response lists. The number of response lists to create is
+ * given by count.
+ * @param octeon_dev - the octeon device structure.
+ */
+int octeon_setup_response_list(struct octeon_device *octeon_dev);
+
+void octeon_delete_response_list(struct octeon_device *octeon_dev);
+
+/** Check the status of first entry in the ordered list. If the instruction at
+ * that entry finished processing or has timed-out, the entry is cleaned.
+ * @param octeon_dev - the octeon device structure.
+ * @param force_quit - the request is forced to timeout if this is 1
+ * @return 1 if the ordered list is empty, 0 otherwise.
+ */
+int lio_process_ordered_list(struct octeon_device *octeon_dev,
+ u32 force_quit);
+
+#endif
diff --git a/drivers/net/ethernet/cavium/octeon/Makefile b/drivers/net/ethernet/cavium/octeon/Makefile
new file mode 100644
index 000000000..4f5098f6b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/octeon/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Cavium network device drivers.
+#
+
+obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
new file mode 100644
index 000000000..edde0b8fa
--- /dev/null
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -0,0 +1,1557 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009-2012 Cavium, Inc
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/capability.h>
+#include <linux/net_tstamp.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/if_vlan.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mixx-defs.h>
+#include <asm/octeon/cvmx-agl-defs.h>
+
+#define DRV_NAME "octeon_mgmt"
+#define DRV_DESCRIPTION \
+ "Cavium Networks Octeon MII (management) port Network Driver"
+
+#define OCTEON_MGMT_NAPI_WEIGHT 16
+
+/* Ring sizes that are powers of two allow for more efficient modulo
+ * opertions.
+ */
+#define OCTEON_MGMT_RX_RING_SIZE 512
+#define OCTEON_MGMT_TX_RING_SIZE 128
+
+/* Allow 8 bytes for vlan and FCS. */
+#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+union mgmt_port_ring_entry {
+ u64 d64;
+ struct {
+#define RING_ENTRY_CODE_DONE 0xf
+#define RING_ENTRY_CODE_MORE 0x10
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 reserved_62_63:2;
+ /* Length of the buffer/packet in bytes */
+ u64 len:14;
+ /* For TX, signals that the packet should be timestamped */
+ u64 tstamp:1;
+ /* The RX error code */
+ u64 code:7;
+ /* Physical address of the buffer */
+ u64 addr:40;
+#else
+ u64 addr:40;
+ u64 code:7;
+ u64 tstamp:1;
+ u64 len:14;
+ u64 reserved_62_63:2;
+#endif
+ } s;
+};
+
+#define MIX_ORING1 0x0
+#define MIX_ORING2 0x8
+#define MIX_IRING1 0x10
+#define MIX_IRING2 0x18
+#define MIX_CTL 0x20
+#define MIX_IRHWM 0x28
+#define MIX_IRCNT 0x30
+#define MIX_ORHWM 0x38
+#define MIX_ORCNT 0x40
+#define MIX_ISR 0x48
+#define MIX_INTENA 0x50
+#define MIX_REMCNT 0x58
+#define MIX_BIST 0x78
+
+#define AGL_GMX_PRT_CFG 0x10
+#define AGL_GMX_RX_FRM_CTL 0x18
+#define AGL_GMX_RX_FRM_MAX 0x30
+#define AGL_GMX_RX_JABBER 0x38
+#define AGL_GMX_RX_STATS_CTL 0x50
+
+#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
+#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
+#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
+
+#define AGL_GMX_RX_ADR_CTL 0x100
+#define AGL_GMX_RX_ADR_CAM_EN 0x108
+#define AGL_GMX_RX_ADR_CAM0 0x180
+#define AGL_GMX_RX_ADR_CAM1 0x188
+#define AGL_GMX_RX_ADR_CAM2 0x190
+#define AGL_GMX_RX_ADR_CAM3 0x198
+#define AGL_GMX_RX_ADR_CAM4 0x1a0
+#define AGL_GMX_RX_ADR_CAM5 0x1a8
+
+#define AGL_GMX_TX_CLK 0x208
+#define AGL_GMX_TX_STATS_CTL 0x268
+#define AGL_GMX_TX_CTL 0x270
+#define AGL_GMX_TX_STAT0 0x280
+#define AGL_GMX_TX_STAT1 0x288
+#define AGL_GMX_TX_STAT2 0x290
+#define AGL_GMX_TX_STAT3 0x298
+#define AGL_GMX_TX_STAT4 0x2a0
+#define AGL_GMX_TX_STAT5 0x2a8
+#define AGL_GMX_TX_STAT6 0x2b0
+#define AGL_GMX_TX_STAT7 0x2b8
+#define AGL_GMX_TX_STAT8 0x2c0
+#define AGL_GMX_TX_STAT9 0x2c8
+
+struct octeon_mgmt {
+ struct net_device *netdev;
+ u64 mix;
+ u64 agl;
+ u64 agl_prt_ctl;
+ int port;
+ int irq;
+ bool has_rx_tstamp;
+ u64 *tx_ring;
+ dma_addr_t tx_ring_handle;
+ unsigned int tx_next;
+ unsigned int tx_next_clean;
+ unsigned int tx_current_fill;
+ /* The tx_list lock also protects the ring related variables */
+ struct sk_buff_head tx_list;
+
+ /* RX variables only touched in napi_poll. No locking necessary. */
+ u64 *rx_ring;
+ dma_addr_t rx_ring_handle;
+ unsigned int rx_next;
+ unsigned int rx_next_fill;
+ unsigned int rx_current_fill;
+ struct sk_buff_head rx_list;
+
+ spinlock_t lock;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ struct device *dev;
+ struct napi_struct napi;
+ struct tasklet_struct tx_clean_tasklet;
+ struct device_node *phy_np;
+ resource_size_t mix_phys;
+ resource_size_t mix_size;
+ resource_size_t agl_phys;
+ resource_size_t agl_size;
+ resource_size_t agl_prt_ctl_phys;
+ resource_size_t agl_prt_ctl_size;
+};
+
+static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
+{
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
+ mix_intena.s.ithena = enable ? 1 : 0;
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
+{
+ union cvmx_mixx_intena mix_intena;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
+ mix_intena.s.othena = enable ? 1 : 0;
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 1);
+}
+
+static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_rx_irq(p, 0);
+}
+
+static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 1);
+}
+
+static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
+{
+ octeon_mgmt_set_tx_irq(p, 0);
+}
+
+static unsigned int ring_max_fill(unsigned int ring_size)
+{
+ return ring_size - 8;
+}
+
+static unsigned int ring_size_to_bytes(unsigned int ring_size)
+{
+ return ring_size * sizeof(union mgmt_port_ring_entry);
+}
+
+static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
+ unsigned int size;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+
+ /* CN56XX pass 1 needs 8 bytes of padding. */
+ size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
+
+ skb = netdev_alloc_skb(netdev, size);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ __skb_queue_tail(&p->rx_list, skb);
+
+ re.d64 = 0;
+ re.s.len = size;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ size,
+ DMA_FROM_DEVICE);
+
+ /* Put it in the ring. */
+ p->rx_ring[p->rx_next_fill] = re.d64;
+ /* Make sure there is no reorder of filling the ring and ringing
+ * the bell
+ */
+ wmb();
+
+ dma_sync_single_for_device(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->rx_next_fill =
+ (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill++;
+ /* Ring the bell. */
+ cvmx_write_csr(p->mix + MIX_IRING2, 1);
+ }
+}
+
+static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
+{
+ union cvmx_mixx_orcnt mix_orcnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ int cleaned = 0;
+ unsigned long flags;
+
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
+ while (mix_orcnt.s.orcnt) {
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
+
+ if (mix_orcnt.s.orcnt == 0) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ break;
+ }
+
+ dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->tx_ring[p->tx_next_clean];
+ p->tx_next_clean =
+ (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ skb = __skb_dequeue(&p->tx_list);
+
+ mix_orcnt.u64 = 0;
+ mix_orcnt.s.orcnt = 1;
+
+ /* Acknowledge to hardware that we have the buffer. */
+ cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
+ p->tx_current_fill--;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+
+ /* Read the hardware TX timestamp if one was recorded */
+ if (unlikely(re.s.tstamp)) {
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ memset(&ts, 0, sizeof(ts));
+ /* Read the timestamp */
+ ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
+ /* Remove the timestamp from the FIFO */
+ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
+ /* Tell the kernel about the timestamp */
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ts);
+ }
+
+ dev_kfree_skb_any(skb);
+ cleaned++;
+
+ mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
+ }
+
+ if (cleaned && netif_queue_stopped(p->netdev))
+ netif_wake_queue(p->netdev);
+}
+
+static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
+{
+ struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
+ octeon_mgmt_clean_tx_buffers(p);
+ octeon_mgmt_enable_tx_irq(p);
+}
+
+static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ unsigned long flags;
+ u64 drop, bad;
+
+ /* These reads also clear the count registers. */
+ drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
+ bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
+
+ if (drop || bad) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.rx_errors += bad;
+ netdev->stats.rx_dropped += drop;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ unsigned long flags;
+
+ union cvmx_agl_gmx_txx_stat0 s0;
+ union cvmx_agl_gmx_txx_stat1 s1;
+
+ /* These reads also clear the count registers. */
+ s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
+ s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
+
+ if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
+ /* Do an atomic update. */
+ spin_lock_irqsave(&p->lock, flags);
+ netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
+ netdev->stats.collisions += s1.s.scol + s1.s.mcol;
+ spin_unlock_irqrestore(&p->lock, flags);
+ }
+}
+
+/*
+ * Dequeue a receive skb and its corresponding ring entry. The ring
+ * entry is returned, *pskb is updated to point to the skb.
+ */
+static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
+ struct sk_buff **pskb)
+{
+ union mgmt_port_ring_entry re;
+
+ dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ re.d64 = p->rx_ring[p->rx_next];
+ p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
+ p->rx_current_fill--;
+ *pskb = __skb_dequeue(&p->rx_list);
+
+ dma_unmap_single(p->dev, re.s.addr,
+ ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
+ DMA_FROM_DEVICE);
+
+ return re.d64;
+}
+
+
+static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
+{
+ struct net_device *netdev = p->netdev;
+ union cvmx_mixx_ircnt mix_ircnt;
+ union mgmt_port_ring_entry re;
+ struct sk_buff *skb;
+ struct sk_buff *skb2;
+ struct sk_buff *skb_new;
+ union mgmt_port_ring_entry re2;
+ int rc = 1;
+
+
+ re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
+ if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
+ /* A good packet, send it up. */
+ skb_put(skb, re.s.len);
+good:
+ /* Process the RX timestamp if it was recorded */
+ if (p->has_rx_tstamp) {
+ /* The first 8 bytes are the timestamp */
+ u64 ns = *(u64 *)skb->data;
+ struct skb_shared_hwtstamps *ts;
+ ts = skb_hwtstamps(skb);
+ ts->hwtstamp = ns_to_ktime(ns);
+ __skb_pull(skb, 8);
+ }
+ skb->protocol = eth_type_trans(skb, netdev);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += skb->len;
+ netif_receive_skb(skb);
+ rc = 0;
+ } else if (re.s.code == RING_ENTRY_CODE_MORE) {
+ /* Packet split across skbs. This can happen if we
+ * increase the MTU. Buffers that are already in the
+ * rx ring can then end up being too small. As the rx
+ * ring is refilled, buffers sized for the new MTU
+ * will be used and we should go back to the normal
+ * non-split case.
+ */
+ skb_put(skb, re.s.len);
+ do {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ if (re2.s.code != RING_ENTRY_CODE_MORE
+ && re2.s.code != RING_ENTRY_CODE_DONE)
+ goto split_error;
+ skb_put(skb2, re2.s.len);
+ skb_new = skb_copy_expand(skb, 0, skb2->len,
+ GFP_ATOMIC);
+ if (!skb_new)
+ goto split_error;
+ if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
+ skb2->len))
+ goto split_error;
+ skb_put(skb_new, skb2->len);
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ skb = skb_new;
+ } while (re2.s.code == RING_ENTRY_CODE_MORE);
+ goto good;
+ } else {
+ /* Some other error, discard it. */
+ dev_kfree_skb_any(skb);
+ /* Error statistics are accumulated in
+ * octeon_mgmt_update_rx_stats.
+ */
+ }
+ goto done;
+split_error:
+ /* Discard the whole mess. */
+ dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb2);
+ while (re2.s.code == RING_ENTRY_CODE_MORE) {
+ re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
+ dev_kfree_skb_any(skb2);
+ }
+ netdev->stats.rx_errors++;
+
+done:
+ /* Tell the hardware we processed a packet. */
+ mix_ircnt.u64 = 0;
+ mix_ircnt.s.ircnt = 1;
+ cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
+ return rc;
+}
+
+static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
+{
+ unsigned int work_done = 0;
+ union cvmx_mixx_ircnt mix_ircnt;
+ int rc;
+
+ mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
+ while (work_done < budget && mix_ircnt.s.ircnt) {
+
+ rc = octeon_mgmt_receive_one(p);
+ if (!rc)
+ work_done++;
+
+ /* Check for more packets. */
+ mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
+ }
+
+ octeon_mgmt_rx_fill_ring(p->netdev);
+
+ return work_done;
+}
+
+static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
+ struct net_device *netdev = p->netdev;
+ unsigned int work_done = 0;
+
+ work_done = octeon_mgmt_receive_packets(p, budget);
+
+ if (work_done < budget) {
+ /* We stopped because no more packets were available. */
+ napi_complete_done(napi, work_done);
+ octeon_mgmt_enable_rx_irq(p);
+ }
+ octeon_mgmt_update_rx_stats(netdev);
+
+ return work_done;
+}
+
+/* Reset the hardware to clean state. */
+static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
+{
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_mixx_bist mix_bist;
+ union cvmx_agl_gmx_bist agl_gmx_bist;
+
+ mix_ctl.u64 = 0;
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
+ } while (mix_ctl.s.busy);
+ mix_ctl.s.reset = 1;
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
+ cvmx_read_csr(p->mix + MIX_CTL);
+ octeon_io_clk_delay(64);
+
+ mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
+ if (mix_bist.u64)
+ dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
+ (unsigned long long)mix_bist.u64);
+
+ agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
+ if (agl_gmx_bist.u64)
+ dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
+ (unsigned long long)agl_gmx_bist.u64);
+}
+
+struct octeon_mgmt_cam_state {
+ u64 cam[6];
+ u64 cam_mask;
+ int cam_index;
+};
+
+static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
+ const unsigned char *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
+ cs->cam_mask |= (1ULL << cs->cam_index);
+ cs->cam_index++;
+}
+
+static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
+ union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
+ unsigned long flags;
+ unsigned int prev_packet_enable;
+ unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
+ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
+ struct octeon_mgmt_cam_state cam_state;
+ struct netdev_hw_addr *ha;
+ int available_cam_entries;
+
+ memset(&cam_state, 0, sizeof(cam_state));
+
+ if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
+ cam_mode = 0;
+ available_cam_entries = 8;
+ } else {
+ /* One CAM entry for the primary address, leaves seven
+ * for the secondary addresses.
+ */
+ available_cam_entries = 7 - netdev->uc.count;
+ }
+
+ if (netdev->flags & IFF_MULTICAST) {
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
+ multicast_mode = 2; /* 2 - Accept all multicast. */
+ else
+ multicast_mode = 0; /* 0 - Use CAM. */
+ }
+
+ if (cam_mode == 1) {
+ /* Add primary address. */
+ octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
+ netdev_for_each_uc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
+ }
+ if (multicast_mode == 0) {
+ netdev_for_each_mc_addr(ha, netdev)
+ octeon_mgmt_cam_state_add(&cam_state, ha->addr);
+ }
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ /* Disable packet I/O. */
+ agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prev_packet_enable = agl_gmx_prtx.s.en;
+ agl_gmx_prtx.s.en = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
+
+ adr_ctl.u64 = 0;
+ adr_ctl.s.cam_mode = cam_mode;
+ adr_ctl.s.mcst = multicast_mode;
+ adr_ctl.s.bcst = 1; /* Allow broadcast */
+
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
+
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
+
+ /* Restore packet I/O. */
+ agl_gmx_prtx.s.en = prev_packet_enable;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
+
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
+{
+ int r = eth_mac_addr(netdev, addr);
+
+ if (r)
+ return r;
+
+ octeon_mgmt_set_rx_filtering(netdev);
+
+ return 0;
+}
+
+static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ netdev->mtu = new_mtu;
+
+ /* HW lifts the limit if the frame is VLAN tagged
+ * (+4 bytes per each tag, up to two tags)
+ */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+ /* Set the hardware to truncate packets larger than the MTU. The jabber
+ * register must be set to a multiple of 8 bytes, so round up. JABBER is
+ * an unconditional limit, so we need to account for two possible VLAN
+ * tags.
+ */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
+ (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
+
+ return 0;
+}
+
+static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
+{
+ struct net_device *netdev = dev_id;
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ union cvmx_mixx_isr mixx_isr;
+
+ mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
+ cvmx_read_csr(p->mix + MIX_ISR);
+
+ if (mixx_isr.s.irthresh) {
+ octeon_mgmt_disable_rx_irq(p);
+ napi_schedule(&p->napi);
+ }
+ if (mixx_isr.s.orthresh) {
+ octeon_mgmt_disable_tx_irq(p);
+ tasklet_schedule(&p->tx_clean_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ union cvmx_mio_ptp_clock_cfg ptp;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ bool have_hw_timestamps = false;
+
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* Check the status of hardware for tiemstamps */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ /* Get the current state of the PTP clock */
+ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
+ if (!ptp.s.ext_clk_en) {
+ /* The clock has not been configured to use an
+ * external source. Program it to use the main clock
+ * reference.
+ */
+ u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
+ if (!ptp.s.ptp_en)
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
+ netdev_info(netdev,
+ "PTP Clock using sclk reference @ %lldHz\n",
+ (NSEC_PER_SEC << 32) / clock_comp);
+ } else {
+ /* The clock is already programmed to use a GPIO */
+ u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
+ netdev_info(netdev,
+ "PTP Clock using GPIO%d @ %lld Hz\n",
+ ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
+ }
+
+ /* Enable the clock if it wasn't done already */
+ if (!ptp.s.ptp_en) {
+ ptp.s.ptp_en = 1;
+ cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
+ }
+ have_hw_timestamps = true;
+ }
+
+ if (!have_hw_timestamps)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ p->has_rx_tstamp = false;
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ p->has_rx_tstamp = have_hw_timestamps;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ if (p->has_rx_tstamp) {
+ rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
+ rxx_frm_ctl.s.ptp_mode = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+ }
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int octeon_mgmt_ioctl(struct net_device *netdev,
+ struct ifreq *rq, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
+ default:
+ return phy_do_ioctl(netdev, rq, cmd);
+ }
+}
+
+static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ /* Disable GMX before we make any changes. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.en = 0;
+ prtx_cfg.s.tx_en = 0;
+ prtx_cfg.s.rx_en = 0;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ int i;
+ for (i = 0; i < 10; i++) {
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
+ break;
+ mdelay(1);
+ i++;
+ }
+ }
+}
+
+static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
+{
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ /* Restore the GMX enable state only if link is set */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+ prtx_cfg.s.tx_en = 1;
+ prtx_cfg.s.rx_en = 1;
+ prtx_cfg.s.en = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+}
+
+static void octeon_mgmt_update_link(struct octeon_mgmt *p)
+{
+ struct net_device *ndev = p->netdev;
+ struct phy_device *phydev = ndev->phydev;
+ union cvmx_agl_gmx_prtx_cfg prtx_cfg;
+
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (!phydev->link)
+ prtx_cfg.s.duplex = 1;
+ else
+ prtx_cfg.s.duplex = phydev->duplex;
+
+ switch (phydev->speed) {
+ case 10:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 1;
+ }
+ break;
+ case 100:
+ prtx_cfg.s.speed = 0;
+ prtx_cfg.s.slottime = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.burst = 1;
+ prtx_cfg.s.speed_msb = 0;
+ }
+ break;
+ case 1000:
+ /* 1000 MBits is only supported on 6XXX chips */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ prtx_cfg.s.speed = 1;
+ prtx_cfg.s.speed_msb = 0;
+ /* Only matters for half-duplex */
+ prtx_cfg.s.slottime = 1;
+ prtx_cfg.s.burst = phydev->duplex;
+ }
+ break;
+ case 0: /* No link */
+ default:
+ break;
+ }
+
+ /* Write the new GMX setting with the port still disabled. */
+ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
+
+ /* Read GMX CFG again to make sure the config is completed. */
+ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
+
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+ union cvmx_agl_gmx_txx_clk agl_clk;
+ union cvmx_agl_prtx_ctl prtx_ctl;
+
+ prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
+ /* MII (both speeds) and RGMII 1000 speed. */
+ agl_clk.s.clk_cnt = 1;
+ if (prtx_ctl.s.mode == 0) { /* RGMII mode */
+ if (phydev->speed == 10)
+ agl_clk.s.clk_cnt = 50;
+ else if (phydev->speed == 100)
+ agl_clk.s.clk_cnt = 5;
+ }
+ cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
+ }
+}
+
+static void octeon_mgmt_adjust_link(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
+ unsigned long flags;
+ int link_changed = 0;
+
+ if (!phydev)
+ return;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+
+ if (!phydev->link && p->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (p->last_duplex != phydev->duplex ||
+ p->last_link != phydev->link ||
+ p->last_speed != phydev->speed)) {
+ octeon_mgmt_disable_link(p);
+ link_changed = 1;
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
+ }
+
+ p->last_link = phydev->link;
+ p->last_speed = phydev->speed;
+ p->last_duplex = phydev->duplex;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ if (link_changed != 0) {
+ if (link_changed > 0)
+ netdev_info(netdev, "Link is up - %d/%s\n",
+ phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
+ else
+ netdev_info(netdev, "Link is down\n");
+ }
+}
+
+static int octeon_mgmt_init_phy(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
+
+ if (octeon_is_simulation() || p->phy_np == NULL) {
+ /* No PHYs in the simulator. */
+ netif_carrier_on(netdev);
+ return 0;
+ }
+
+ phydev = of_phy_connect(netdev, p->phy_np,
+ octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+
+ if (!phydev)
+ return -EPROBE_DEFER;
+
+ return 0;
+}
+
+static int octeon_mgmt_open(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ union cvmx_mixx_ctl mix_ctl;
+ union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
+ union cvmx_mixx_oring1 oring1;
+ union cvmx_mixx_iring1 iring1;
+ union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
+ union cvmx_mixx_irhwm mix_irhwm;
+ union cvmx_mixx_orhwm mix_orhwm;
+ union cvmx_mixx_intena mix_intena;
+ struct sockaddr sa;
+
+ /* Allocate ring buffers. */
+ p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->tx_ring)
+ return -ENOMEM;
+ p->tx_ring_handle =
+ dma_map_single(p->dev, p->tx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ p->tx_next = 0;
+ p->tx_next_clean = 0;
+ p->tx_current_fill = 0;
+
+
+ p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ GFP_KERNEL);
+ if (!p->rx_ring)
+ goto err_nomem;
+ p->rx_ring_handle =
+ dma_map_single(p->dev, p->rx_ring,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ p->rx_next = 0;
+ p->rx_next_fill = 0;
+ p->rx_current_fill = 0;
+
+ octeon_mgmt_reset_hw(p);
+
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
+
+ /* Bring it out of reset if needed. */
+ if (mix_ctl.s.reset) {
+ mix_ctl.s.reset = 0;
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
+ do {
+ mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
+ } while (mix_ctl.s.reset);
+ }
+
+ if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
+ agl_gmx_inf_mode.u64 = 0;
+ agl_gmx_inf_mode.s.en = 1;
+ cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
+ }
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
+ || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
+ /* Force compensation values, as they are not
+ * determined properly by HW
+ */
+ union cvmx_agl_gmx_drv_ctl drv_ctl;
+
+ drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
+ if (p->port) {
+ drv_ctl.s.byp_en1 = 1;
+ drv_ctl.s.nctl1 = 6;
+ drv_ctl.s.pctl1 = 6;
+ } else {
+ drv_ctl.s.byp_en = 1;
+ drv_ctl.s.nctl = 6;
+ drv_ctl.s.pctl = 6;
+ }
+ cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
+ }
+
+ oring1.u64 = 0;
+ oring1.s.obase = p->tx_ring_handle >> 3;
+ oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
+ cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
+
+ iring1.u64 = 0;
+ iring1.s.ibase = p->rx_ring_handle >> 3;
+ iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
+ cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
+
+ memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
+ octeon_mgmt_set_mac_address(netdev, &sa);
+
+ octeon_mgmt_change_mtu(netdev, netdev->mtu);
+
+ /* Enable the port HW. Packets are not allowed until
+ * cvmx_mgmt_port_enable() is called.
+ */
+ mix_ctl.u64 = 0;
+ mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
+ mix_ctl.s.en = 1; /* Enable the port */
+ mix_ctl.s.nbtarb = 0; /* Arbitration mode */
+ /* MII CB-request FIFO programmable high watermark */
+ mix_ctl.s.mrq_hwm = 1;
+#ifdef __LITTLE_ENDIAN
+ mix_ctl.s.lendian = 1;
+#endif
+ cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
+
+ /* Read the PHY to find the mode of the interface. */
+ if (octeon_mgmt_init_phy(netdev)) {
+ dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
+ goto err_noirq;
+ }
+
+ /* Set the mode of the interface, RGMII/MII. */
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
+ union cvmx_agl_prtx_ctl agl_prtx_ctl;
+ int rgmii_mode =
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ netdev->phydev->supported) |
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ netdev->phydev->supported)) != 0;
+
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* MII clocks counts are based on the 125Mhz
+ * reference, which has an 8nS period. So our delays
+ * need to be multiplied by this factor.
+ */
+#define NS_PER_PHY_CLK 8
+
+ /* Take the DLL and clock tree out of reset */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.clkrst = 0;
+ if (rgmii_mode) {
+ agl_prtx_ctl.s.dllrst = 0;
+ agl_prtx_ctl.s.clktx_byp = 0;
+ }
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
+
+ /* Wait for the DLL to lock. External 125 MHz
+ * reference clock must be stable at this point.
+ */
+ ndelay(256 * NS_PER_PHY_CLK);
+
+ /* Enable the interface */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+ agl_prtx_ctl.s.enable = 1;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+
+ /* Read the value back to force the previous write */
+ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
+
+ /* Enable the compensation controller */
+ agl_prtx_ctl.s.comp = 1;
+ agl_prtx_ctl.s.drv_byp = 0;
+ cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
+ /* Force write out before wait. */
+ cvmx_read_csr(p->agl_prt_ctl);
+
+ /* For compensation state to lock. */
+ ndelay(1040 * NS_PER_PHY_CLK);
+
+ /* Default Interframe Gaps are too small. Recommended
+ * workaround is.
+ *
+ * AGL_GMX_TX_IFG[IFG1]=14
+ * AGL_GMX_TX_IFG[IFG2]=10
+ */
+ cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
+ }
+
+ octeon_mgmt_rx_fill_ring(netdev);
+
+ /* Clear statistics. */
+ /* Clear on read. */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
+ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
+
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
+ cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
+
+ /* Clear any pending interrupts */
+ cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
+
+ if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
+ netdev)) {
+ dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
+ goto err_noirq;
+ }
+
+ /* Interrupt every single RX packet */
+ mix_irhwm.u64 = 0;
+ mix_irhwm.s.irhwm = 0;
+ cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
+
+ /* Interrupt when we have 1 or more packets to clean. */
+ mix_orhwm.u64 = 0;
+ mix_orhwm.s.orhwm = 0;
+ cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
+
+ /* Enable receive and transmit interrupts */
+ mix_intena.u64 = 0;
+ mix_intena.s.ithena = 1;
+ mix_intena.s.othena = 1;
+ cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
+
+ /* Enable packet I/O. */
+
+ rxx_frm_ctl.u64 = 0;
+ rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
+ rxx_frm_ctl.s.pre_align = 1;
+ /* When set, disables the length check for non-min sized pkts
+ * with padding in the client data.
+ */
+ rxx_frm_ctl.s.pad_len = 1;
+ /* When set, disables the length check for VLAN pkts */
+ rxx_frm_ctl.s.vlan_len = 1;
+ /* When set, PREAMBLE checking is less strict */
+ rxx_frm_ctl.s.pre_free = 1;
+ /* Control Pause Frames can match station SMAC */
+ rxx_frm_ctl.s.ctl_smac = 0;
+ /* Control Pause Frames can match globally assign Multicast address */
+ rxx_frm_ctl.s.ctl_mcst = 1;
+ /* Forward pause information to TX block */
+ rxx_frm_ctl.s.ctl_bck = 1;
+ /* Drop Control Pause Frames */
+ rxx_frm_ctl.s.ctl_drp = 1;
+ /* Strip off the preamble */
+ rxx_frm_ctl.s.pre_strp = 1;
+ /* This port is configured to send PREAMBLE+SFD to begin every
+ * frame. GMX checks that the PREAMBLE is sent correctly.
+ */
+ rxx_frm_ctl.s.pre_chk = 1;
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
+
+ /* Configure the port duplex, speed and enables */
+ octeon_mgmt_disable_link(p);
+ if (netdev->phydev)
+ octeon_mgmt_update_link(p);
+ octeon_mgmt_enable_link(p);
+
+ p->last_link = 0;
+ p->last_speed = 0;
+ /* PHY is not present in simulator. The carrier is enabled
+ * while initializing the phy for simulator, leave it enabled.
+ */
+ if (netdev->phydev) {
+ netif_carrier_off(netdev);
+ phy_start(netdev->phydev);
+ }
+
+ netif_wake_queue(netdev);
+ napi_enable(&p->napi);
+
+ return 0;
+err_noirq:
+ octeon_mgmt_reset_hw(p);
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+err_nomem:
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+ return -ENOMEM;
+}
+
+static int octeon_mgmt_stop(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ napi_disable(&p->napi);
+ netif_stop_queue(netdev);
+
+ if (netdev->phydev) {
+ phy_stop(netdev->phydev);
+ phy_disconnect(netdev->phydev);
+ }
+
+ netif_carrier_off(netdev);
+
+ octeon_mgmt_reset_hw(p);
+
+ free_irq(p->irq, netdev);
+
+ /* dma_unmap is a nop on Octeon, so just free everything. */
+ skb_queue_purge(&p->tx_list);
+ skb_queue_purge(&p->rx_list);
+
+ dma_unmap_single(p->dev, p->rx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->rx_ring);
+
+ dma_unmap_single(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+ kfree(p->tx_ring);
+
+ return 0;
+}
+
+static netdev_tx_t
+octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+ union mgmt_port_ring_entry re;
+ unsigned long flags;
+ netdev_tx_t rv = NETDEV_TX_BUSY;
+
+ re.d64 = 0;
+ re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
+ re.s.len = skb->len;
+ re.s.addr = dma_map_single(p->dev, skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+
+ if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ netif_stop_queue(netdev);
+ spin_lock_irqsave(&p->tx_list.lock, flags);
+ }
+
+ if (unlikely(p->tx_current_fill >=
+ ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+ dma_unmap_single(p->dev, re.s.addr, re.s.len,
+ DMA_TO_DEVICE);
+ goto out;
+ }
+
+ __skb_queue_tail(&p->tx_list, skb);
+
+ /* Put it in the ring. */
+ p->tx_ring[p->tx_next] = re.d64;
+ p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
+ p->tx_current_fill++;
+
+ spin_unlock_irqrestore(&p->tx_list.lock, flags);
+
+ dma_sync_single_for_device(p->dev, p->tx_ring_handle,
+ ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
+ DMA_BIDIRECTIONAL);
+
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += skb->len;
+
+ /* Ring the bell. */
+ cvmx_write_csr(p->mix + MIX_ORING2, 1);
+
+ netif_trans_update(netdev);
+ rv = NETDEV_TX_OK;
+out:
+ octeon_mgmt_update_tx_stats(netdev);
+ return rv;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void octeon_mgmt_poll_controller(struct net_device *netdev)
+{
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ octeon_mgmt_receive_packets(p, 16);
+ octeon_mgmt_update_rx_stats(netdev);
+}
+#endif
+
+static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+}
+
+static int octeon_mgmt_nway_reset(struct net_device *dev)
+{
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
+ .get_drvinfo = octeon_mgmt_get_drvinfo,
+ .nway_reset = octeon_mgmt_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops octeon_mgmt_ops = {
+ .ndo_open = octeon_mgmt_open,
+ .ndo_stop = octeon_mgmt_stop,
+ .ndo_start_xmit = octeon_mgmt_xmit,
+ .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
+ .ndo_set_mac_address = octeon_mgmt_set_mac_address,
+ .ndo_eth_ioctl = octeon_mgmt_ioctl,
+ .ndo_change_mtu = octeon_mgmt_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = octeon_mgmt_poll_controller,
+#endif
+};
+
+static int octeon_mgmt_probe(struct platform_device *pdev)
+{
+ struct net_device *netdev;
+ struct octeon_mgmt *p;
+ const __be32 *data;
+ struct resource *res_mix;
+ struct resource *res_agl;
+ struct resource *res_agl_prt_ctl;
+ int len;
+ int result;
+
+ netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
+ if (netdev == NULL)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ platform_set_drvdata(pdev, netdev);
+ p = netdev_priv(netdev);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
+
+ p->netdev = netdev;
+ p->dev = &pdev->dev;
+ p->has_rx_tstamp = false;
+
+ data = of_get_property(pdev->dev.of_node, "cell-index", &len);
+ if (data && len == sizeof(*data)) {
+ p->port = be32_to_cpup(data);
+ } else {
+ dev_err(&pdev->dev, "no 'cell-index' property\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
+
+ result = platform_get_irq(pdev, 0);
+ if (result < 0)
+ goto err;
+
+ p->irq = result;
+
+ res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mix == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_agl == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+ if (res_agl_prt_ctl == NULL) {
+ dev_err(&pdev->dev, "no 'reg' resource\n");
+ result = -ENXIO;
+ goto err;
+ }
+
+ p->mix_phys = res_mix->start;
+ p->mix_size = resource_size(res_mix);
+ p->agl_phys = res_agl->start;
+ p->agl_size = resource_size(res_agl);
+ p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
+ p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
+
+
+ if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
+ res_mix->name)) {
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_mix->name);
+ result = -ENXIO;
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
+ res_agl->name)) {
+ result = -ENXIO;
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_agl->name);
+ goto err;
+ }
+
+ if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
+ result = -ENXIO;
+ dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
+ res_agl_prt_ctl->name);
+ goto err;
+ }
+
+ p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
+ p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
+ p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
+ p->agl_prt_ctl_size);
+ if (!p->mix || !p->agl || !p->agl_prt_ctl) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ result = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&p->lock);
+
+ skb_queue_head_init(&p->tx_list);
+ skb_queue_head_init(&p->rx_list);
+ tasklet_setup(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->netdev_ops = &octeon_mgmt_ops;
+ netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
+
+ netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
+ netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
+
+ result = of_get_ethdev_address(pdev->dev.of_node, netdev);
+ if (result)
+ eth_hw_addr_random(netdev);
+
+ p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+
+ result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (result)
+ goto err;
+
+ netif_carrier_off(netdev);
+ result = register_netdev(netdev);
+ if (result)
+ goto err;
+
+ return 0;
+
+err:
+ of_node_put(p->phy_np);
+ free_netdev(netdev);
+ return result;
+}
+
+static int octeon_mgmt_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct octeon_mgmt *p = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ of_node_put(p->phy_np);
+ free_netdev(netdev);
+ return 0;
+}
+
+static const struct of_device_id octeon_mgmt_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-mix",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
+
+static struct platform_driver octeon_mgmt_driver = {
+ .driver = {
+ .name = "octeon_mgmt",
+ .of_match_table = octeon_mgmt_match,
+ },
+ .probe = octeon_mgmt_probe,
+ .remove = octeon_mgmt_remove,
+};
+
+module_platform_driver(octeon_mgmt_driver);
+
+MODULE_SOFTDEP("pre: mdio-cavium");
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 000000000..2fc6142d1
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_RGX) += thunder_xcv.o
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 000000000..090d6b839
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
+#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
+#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
+#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
+#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
+
+#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
+#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
+#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
+#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
+
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Max when CPI_ALG is IP diffserv */
+#define NIC_MAX_CPI_PER_LMAC 64
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Minimum FIFO level before all packets for the CQ are dropped
+ *
+ * This value ensures that once a packet has been "accepted"
+ * for reception it will not get dropped due to non-availability
+ * of CQ descriptor. An errata in HW mandates this value to be
+ * atleast 0x100.
+ */
+#define NICPF_CQM_MIN_DROP_LEVEL 0x100
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.025usec
+ */
+#define NICPF_CLK_PER_INT_TICK 1
+
+/* Time to wait before we decide that a SQ is stuck.
+ *
+ * Since both pkt rx and tx notifications are done with same CQ,
+ * when packets are being received at very high rate (eg: L2 forwarding)
+ * then freeing transmitted skbs will be delayed and watchdog
+ * will kick in, resetting interface. Hence keeping this value high.
+ */
+#define NICVF_TX_TIMEOUT (50 * HZ)
+
+struct nicvf_cq_poll {
+ struct nicvf *nicvf;
+ u8 cq_idx; /* Completion queue index */
+ struct napi_struct napi;
+};
+
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+struct nicvf_pfc {
+ u8 autoneg;
+ u8 fc_rx;
+ u8 fc_tx;
+};
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes;
+ u64 rx_frames;
+ u64 rx_ucast_frames;
+ u64 rx_bcast_frames;
+ u64 rx_mcast_frames;
+ u64 rx_drops;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* CQE Rx errs */
+ u64 rx_bgx_truncated_pkts;
+ u64 rx_jabber_errs;
+ u64 rx_fcs_errs;
+ u64 rx_bgx_errs;
+ u64 rx_prel2_errs;
+ u64 rx_l2_hdr_malformed;
+ u64 rx_oversize;
+ u64 rx_undersize;
+ u64 rx_l2_len_mismatch;
+ u64 rx_l2_pclp;
+ u64 rx_ip_ver_errs;
+ u64 rx_ip_csum_errs;
+ u64 rx_ip_hdr_malformed;
+ u64 rx_ip_payload_malformed;
+ u64 rx_ip_ttl_errs;
+ u64 rx_l3_pclp;
+ u64 rx_l4_malformed;
+ u64 rx_l4_csum_errs;
+ u64 rx_udp_len_errs;
+ u64 rx_l4_port_errs;
+ u64 rx_tcp_flag_errs;
+ u64 rx_tcp_offset_errs;
+ u64 rx_l4_pclp;
+ u64 rx_truncated_pkts;
+
+ /* CQE Tx errs */
+ u64 tx_desc_fault;
+ u64 tx_hdr_cons_err;
+ u64 tx_subdesc_err;
+ u64 tx_max_size_exceeded;
+ u64 tx_imm_size_oflow;
+ u64 tx_data_seq_err;
+ u64 tx_mem_seq_err;
+ u64 tx_lock_viol;
+ u64 tx_data_fault;
+ u64 tx_tstmp_conflict;
+ u64 tx_tstmp_timeout;
+ u64 tx_mem_fault;
+ u64 tx_csum_overlap;
+ u64 tx_csum_overflow;
+
+ /* driver debug stats */
+ u64 tx_tso;
+ u64 tx_timeout;
+ u64 txq_stop;
+ u64 txq_wake;
+
+ u64 rcv_buffer_alloc_failures;
+ u64 page_alloc;
+
+ struct u64_stats_sync syncp;
+};
+
+struct cavium_ptp;
+
+struct xcast_addr_list {
+ int count;
+ u64 mc[];
+};
+
+struct nicvf_work {
+ struct work_struct work;
+ u8 mode;
+ struct xcast_addr_list *mc;
+};
+
+struct nicvf {
+ struct nicvf *pnicvf;
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ struct bpf_prog *xdp_prog;
+#define MAX_QUEUES_PER_QSET 8
+ struct queue_set *qs;
+ void *iommu_domain;
+ u8 vf_id;
+ u8 sqs_id;
+ bool sqs_mode;
+ bool hw_tso;
+ bool t88;
+
+ /* Receive buffer alloc */
+ u32 rb_page_offset;
+ u16 rb_pageref;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct page *rb_page;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+
+ /* Secondary Qset */
+ u8 sqs_count;
+#define MAX_SQS_PER_VF_SINGLE_NODE 5
+#define MAX_SQS_PER_VF 11
+ struct nicvf *snicvf[MAX_SQS_PER_VF];
+
+ /* Queue count */
+ u8 rx_queues;
+ u8 tx_queues;
+ u8 xdp_tx_queues;
+ u8 max_queues;
+
+ u8 node;
+ u8 cpi_alg;
+ bool link_up;
+ u8 mac_type;
+ u8 duplex;
+ u32 speed;
+ bool tns_mode;
+ bool loopback_supported;
+ struct nicvf_rss_info rss_info;
+ struct nicvf_pfc pfc;
+ struct tasklet_struct qs_err_task;
+ struct work_struct reset_task;
+ struct nicvf_work rx_mode_work;
+ /* spinlock to protect workqueue arguments from concurrent access */
+ spinlock_t rx_mode_wq_lock;
+ /* workqueue for handling kernel ndo_set_rx_mode() calls */
+ struct workqueue_struct *nicvf_rx_mode_wq;
+ /* mutex to protect VF's mailbox contents from concurrent access */
+ struct mutex rx_mode_mtx;
+ struct delayed_work link_change_work;
+ /* PTP timestamp */
+ struct cavium_ptp *ptp_clock;
+ /* Inbound timestamping is on */
+ bool hw_rx_tstamp;
+ /* When the packet that requires timestamping is sent, hardware inserts
+ * two entries to the completion queue. First is the regular
+ * CQE_TYPE_SEND entry that signals that the packet was sent.
+ * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
+ * for that packet.
+ * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
+ * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
+ * entry.
+ * So `ptp_skb` is used to hold the pointer to the packet between
+ * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
+ */
+ struct sk_buff *ptp_skb;
+ /* `tx_ptp_skbs` is set when the hardware is sending a packet that
+ * requires timestamping. Cavium hardware can not process more than one
+ * such packet at once so this is set each time the driver submits
+ * a packet that requires timestamping to the send queue and clears
+ * each time it receives the entry on the completion queue saying
+ * that such packet was sent.
+ * So `tx_ptp_skbs` prevents driver from submitting more than one
+ * packet that requires timestamping to the hardware for transmitting.
+ */
+ atomic_t tx_ptp_skbs;
+
+ /* Interrupt coalescing settings */
+ u32 cq_coalesce_usecs;
+ u32 msg_enable;
+
+ /* Stats */
+ struct nicvf_hw_stats hw_stats;
+ struct nicvf_drv_stats __percpu *drv_stats;
+ struct bgx_stats bgx_stats;
+
+ /* Napi */
+ struct nicvf_cq_poll *napi[8];
+
+ /* MSI-X */
+ u8 num_vec;
+ char irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
+ bool irq_allocated[NIC_VF_MSIX_VECTORS];
+ cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
+
+ /* VF <-> PF mailbox communication */
+ bool pf_acked;
+ bool pf_nacked;
+ bool set_mac_pending;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
+#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
+#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
+#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
+#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
+#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
+#define NIC_MBOX_MSG_PFC 0x18 /* Pause frame control */
+#define NIC_MBOX_MSG_PTP_CFG 0x19 /* HW packet timestamp */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
+#define NIC_MBOX_MSG_RESET_XCAST 0xF2 /* Reset DCAM filtering mode */
+#define NIC_MBOX_MSG_ADD_MCAST 0xF3 /* Add MAC to DCAM filters */
+#define NIC_MBOX_MSG_SET_XCAST 0xF4 /* Set MCAST/BCAST RX mode */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 node_id;
+ u8 tns_mode:1;
+ u8 sqs_mode:1;
+ u8 loopback_supported:1;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u8 sqs_count;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ bool sqs_mode;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 mac_type;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+/* Get Extra Qset IDs */
+struct sqs_alloc {
+ u8 msg;
+ u8 vf_id;
+ u8 qs_count;
+};
+
+struct nicvf_ptr {
+ u8 msg;
+ u8 vf_id;
+ bool sqs_mode;
+ u8 sqs_id;
+ u64 nicvf;
+};
+
+/* Set interface in loopback mode */
+struct set_loopback {
+ u8 msg;
+ u8 vf_id;
+ bool enable;
+};
+
+/* Reset statistics counters */
+struct reset_stat_cfg {
+ u8 msg;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
+ u16 rx_stat_mask;
+ /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
+ u8 tx_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
+ */
+ u16 rq_stat_mask;
+ /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
+ * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
+ * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
+ * ..
+ * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
+ * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
+ */
+ u16 sq_stat_mask;
+};
+
+struct pfc {
+ u8 msg;
+ u8 get; /* Get or set PFC settings */
+ u8 autoneg;
+ u8 fc_rx;
+ u8 fc_tx;
+};
+
+struct set_ptp {
+ u8 msg;
+ bool enable;
+};
+
+struct xcast {
+ u8 msg;
+ u8 mode;
+ u64 mac:48;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+ struct sqs_alloc sqs_alloc;
+ struct nicvf_ptr nicvf;
+ struct set_loopback lbk;
+ struct reset_stat_cfg reset_stat;
+ struct pfc pfc;
+ struct set_ptp ptp;
+ struct xcast xcast;
+};
+
+#define NIC_NODE_ID_MASK 0x03
+#define NIC_NODE_ID_SHIFT 44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+ u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+ return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+static inline bool pass1_silicon(struct pci_dev *pdev)
+{
+ return (pdev->revision < 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
+}
+
+static inline bool pass2_silicon(struct pci_dev *pdev)
+{
+ return (pdev->revision >= 8) &&
+ (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 000000000..0ec65ec63
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,1416 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/if_vlan.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "nicpf"
+#define DRV_VERSION "1.0"
+
+#define NIC_VF_PER_MBX_REG 64
+
+struct hw_info {
+ u8 bgx_cnt;
+ u8 chans_per_lmac;
+ u8 chans_per_bgx; /* Rx/Tx chans */
+ u8 chans_per_rgx;
+ u8 chans_per_lbk;
+ u16 cpi_cnt;
+ u16 rssi_cnt;
+ u16 rss_ind_tbl_size;
+ u16 tl4_cnt;
+ u16 tl3_cnt;
+ u8 tl2_cnt;
+ u8 tl1_cnt;
+ bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
+};
+
+struct nicpf {
+ struct pci_dev *pdev;
+ struct hw_info *hw;
+ u8 node;
+ unsigned int flags;
+ u8 num_vf_en; /* No of VF enabled */
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ void __iomem *reg_base; /* Register start address */
+ u8 num_sqs_en; /* Secondary qsets enabled */
+ u64 nicvf[MAX_NUM_VFS_SUPPORTED];
+ u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
+ u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
+ bool sqs_used[MAX_NUM_VFS_SUPPORTED];
+ struct pkind_cfg pkind;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
+ u8 *vf_lmac_map;
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
+
+ /* MSI-X */
+ u8 num_vec;
+ unsigned int irq_allocated[NIC_PF_MSIX_VECTORS];
+ char irq_name[NIC_PF_MSIX_VECTORS][20];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+ int vf_cnt = pci_sriov_get_totalvfs(nic->pdev);
+
+#define INTR_MASK(vfs) ((vfs < 64) ? (BIT_ULL(vfs) - 1) : (~0ull))
+
+ /* Clear it, to avoid spurious interrupts (if any) */
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT, INTR_MASK(vf_cnt));
+
+ /* Enable mailbox interrupt for all VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, INTR_MASK(vf_cnt));
+ /* One mailbox intr enable reg per 64 VFs */
+ if (vf_cnt > 64) {
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64),
+ INTR_MASK(vf_cnt - 64));
+ }
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (pass1_silicon(nic->pdev)) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq_relaxed(msg[0], mbx_addr);
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ } else {
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ writeq_relaxed(msg[0], mbx_addr);
+ }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac;
+ const char *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ if (vf < nic->num_vf_en) {
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+ }
+ mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
+ mbx.nic_cfg.node_id = nic->node;
+
+ mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
+
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ u16 timeout = ~0x00;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ /* Wait till sync cycle is finished */
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+ int bgx_idx, lmac;
+ union nic_mbx mbx = {};
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = bgx->vf_id;
+ mbx.bgx_stats.rx = bgx->rx;
+ mbx.bgx_stats.idx = bgx->idx;
+ if (bgx->rx)
+ mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ else
+ mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ int bgx, lmac, lmac_cnt;
+ u64 lmac_credits;
+
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
+ return 1;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac += bgx * MAX_LMAC_PER_BGX;
+
+ new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+
+ /* Update corresponding LMAC credits */
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
+ lmac_credits &= ~(0xFFFFFULL << 12);
+ lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
+
+ /* Enforce MTU in HW
+ * This config is supported only from 88xx pass 2.0 onwards.
+ */
+ if (!pass1_silicon(nic->pdev))
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac, max_lmac;
+ u16 sdevid;
+ u64 lmac_cfg;
+
+ /* There is a issue in HW where-in while sending GSO sized
+ * pkts as part of TSO, if pkt len falls below this size
+ * NIC will zero PAD packet and also updates IP total length.
+ * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+ * headers, BGX will do the padding to transmit 64 byte pkt.
+ */
+ if (size > 52)
+ size = 52;
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ /* 81xx's RGX has only one LMAC */
+ if (sdevid == PCI_SUBSYS_DEVID_81XX_NIC_PF)
+ max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
+ else
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ for (lmac = 0; lmac < max_lmac; lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ unsigned bgx_map = bgx_get_map(nic->node);
+ int bgx, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+
+ for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
+ if (!(bgx_map & (1 << bgx)))
+ continue;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* channel credit enable */
+ lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+ /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+
+ /* On CN81XX there are only 8 VFs but max possible no of
+ * interfaces are 9.
+ */
+ if (nic->num_vf_en >= pci_sriov_get_totalvfs(nic->pdev)) {
+ nic->num_vf_en = pci_sriov_get_totalvfs(nic->pdev);
+ break;
+ }
+ }
+}
+
+static void nic_get_hw_info(struct nicpf *nic)
+{
+ u16 sdevid;
+ struct hw_info *hw = nic->hw;
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_88XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN88XX;
+ hw->chans_per_lmac = 16;
+ hw->chans_per_bgx = 128;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 4096;
+ hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 2;
+ hw->tl1_per_bgx = true;
+ break;
+ case PCI_SUBSYS_DEVID_81XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN81XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_rgx = 8;
+ hw->chans_per_lbk = 24;
+ hw->cpi_cnt = 512;
+ hw->rssi_cnt = 256;
+ hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 64;
+ hw->tl2_cnt = 16;
+ hw->tl1_cnt = 10;
+ hw->tl1_per_bgx = false;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_NIC_PF:
+ hw->bgx_cnt = MAX_BGX_PER_CN83XX;
+ hw->chans_per_lmac = 8;
+ hw->chans_per_bgx = 32;
+ hw->chans_per_lbk = 64;
+ hw->cpi_cnt = 2048;
+ hw->rssi_cnt = 1024;
+ hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
+ hw->tl3_cnt = 256;
+ hw->tl2_cnt = 64;
+ hw->tl1_cnt = 18;
+ hw->tl1_per_bgx = false;
+ break;
+ }
+ hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->pdev);
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+ u64 cqm_cfg;
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+ /* TNS and TNS bypass modes are present only on 88xx
+ * Also offset of this CSR has changed in 81xx and 83xx.
+ */
+ if (nic->pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF) {
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX0_BLOCK | (1ULL << 16));
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) |
+ BGX1_BLOCK | (1ULL << 16));
+ } else {
+ /* Configure timestamp generation timeout to 10us */
+ for (i = 0; i < nic->hw->bgx_cnt; i++)
+ nic_reg_write(nic, NIC_PF_INTFX_SEND_CFG | (i << 3),
+ (1ULL << 16));
+ }
+
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+ (1ULL << 63) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | BGX1_BLOCK);
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+ *(u64 *)&nic->pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+
+ /* Enable VLAN ethertype matching and stripping */
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
+ (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
+
+ /* Check if HW expected value is higher (could be in future chips) */
+ cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
+ if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
+ nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ struct hw_info *hw = nic->hw;
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
+ rssi_base = vnic * hw->rss_ind_tbl_size;
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ if (pass1_silicon(nic->pdev)) {
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) |
+ (rssi_base + rssi));
+ } else {
+ /* Set MPI_ALG to '0' to disable MCAM parsing */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (padd << 16));
+ /* MPI index is same as CPI if MPI_ALG is not enabled */
+ nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (rssi_base + rssi));
+ }
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+ nic->rssi_base[cfg->vf_id] = rssi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.ind_tbl_size = nic->hw->rss_ind_tbl_size;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+ u8 qset, idx = 0;
+ u64 cpi_cfg, cpi_base, rssi_base, rssi;
+ u64 idx_addr;
+
+ rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
+
+ rssi = rssi_base;
+
+ for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ u8 svf = cfg->ind_tbl[idx] >> 3;
+
+ if (svf)
+ qset = nic->vf_sqs[cfg->vf_id][svf - 1];
+ else
+ qset = cfg->vf_id;
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+ idx++;
+ }
+
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ if (pass1_silicon(nic->pdev))
+ idx_addr = NIC_PF_CPI_0_2047_CFG;
+ else
+ idx_addr = NIC_PF_MPI_0_2047_CFG;
+ cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
+ cpi_cfg &= ~(0xFULL << 20);
+ cpi_cfg |= (cfg->hash_bits << 20);
+ nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0 on 88xx
+ * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
+ struct sq_cfg_msg *sq)
+{
+ struct hw_info *hw = nic->hw;
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+ u8 sq_idx = sq->sq_num;
+ u8 pqs_vnic;
+ int svf;
+
+ if (sq->sqs_mode)
+ pqs_vnic = nic->pqs_vf[vnic];
+ else
+ pqs_vnic = vnic;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
+
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ /* For 88xx 0-511 TL4 transmits via BGX0 and
+ * 512-1023 TL4s transmit via BGX1.
+ */
+ if (hw->tl1_per_bgx) {
+ tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
+ if (!sq->sqs_mode) {
+ tl4 += (lmac * MAX_QUEUES_PER_QSET);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
+ tl4 += (lmac * MAX_QUEUES_PER_QSET * MAX_SQS_PER_VF);
+ tl4 += (svf * MAX_QUEUES_PER_QSET);
+ }
+ } else {
+ tl4 = (vnic * MAX_QUEUES_PER_QSET);
+ }
+ tl4 += sq_idx;
+
+ tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+
+ /* On 88xx 0-127 channels are for BGX0 and
+ * 127-255 channels for BGX1.
+ *
+ * On 81xx/83xx TL3_CHAN reg should be configured with channel
+ * within LMAC i.e 0-7 and not the actual channel number like on 88xx
+ */
+ chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
+ if (hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ else
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
+
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+
+ /* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
+ * on 81xx/83xx TL2 needs to be configured to transmit to one of the
+ * possible LMACs.
+ *
+ * This register doesn't exist on 88xx.
+ */
+ if (!hw->tl1_per_bgx)
+ nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
+ lmac + (bgx * MAX_LMAC_PER_BGX));
+}
+
+/* Send primary nicvf pointer to secondary QS's VF */
+static void nic_send_pnicvf(struct nicpf *nic, int sqs)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
+ nic_send_msg_to_vf(nic, sqs, &mbx);
+}
+
+/* Send SQS's nicvf pointer to primary QS's VF */
+static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
+{
+ union nic_mbx mbx = {};
+ int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.sqs_id = nicvf->sqs_id;
+ mbx.nicvf.nicvf = nic->nicvf[sqs_id];
+ nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
+}
+
+/* Find next available Qset that can be assigned as a
+ * secondary Qset to a VF.
+ */
+static int nic_nxt_avail_sqs(struct nicpf *nic)
+{
+ int sqs;
+
+ for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
+ if (!nic->sqs_used[sqs])
+ nic->sqs_used[sqs] = true;
+ else
+ continue;
+ return sqs + nic->num_vf_en;
+ }
+ return -1;
+}
+
+/* Allocate additional Qsets for requested VF */
+static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
+{
+ union nic_mbx mbx = {};
+ int idx, alloc_qs = 0;
+ int sqs_id;
+
+ if (!nic->num_sqs_en)
+ goto send_mbox;
+
+ for (idx = 0; idx < sqs->qs_count; idx++) {
+ sqs_id = nic_nxt_avail_sqs(nic);
+ if (sqs_id < 0)
+ break;
+ nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
+ nic->pqs_vf[sqs_id] = sqs->vf_id;
+ alloc_qs++;
+ }
+
+send_mbox:
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = sqs->vf_id;
+ mbx.sqs_alloc.qs_count = alloc_qs;
+ nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
+}
+
+static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
+{
+ int bgx_idx, lmac_idx;
+
+ if (lbk->vf_id >= nic->num_vf_en)
+ return -1;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+ lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
+
+ bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
+
+ /* Enable moving average calculation.
+ * Keep the LVL/AVG delay to HW enforced minimum so that, not too many
+ * packets sneek in between average calculations.
+ */
+ nic_reg_write(nic, NIC_PF_CQ_AVG_CFG,
+ (BIT_ULL(20) | 0x2ull << 14 | 0x1));
+ nic_reg_write(nic, NIC_PF_RRM_AVG_CFG,
+ (BIT_ULL(20) | 0x3ull << 14 | 0x1));
+
+ return 0;
+}
+
+/* Reset statistics counters */
+static int nic_reset_stat_counters(struct nicpf *nic,
+ int vf, struct reset_stat_cfg *cfg)
+{
+ int i, stat, qnum;
+ u64 reg_addr;
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++) {
+ if (cfg->rx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_RX_STAT_0_13 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i < TX_STATS_ENUM_LAST; i++) {
+ if (cfg->tx_stat_mask & BIT(i)) {
+ reg_addr = NIC_PF_VNIC_0_127_TX_STAT_0_4 |
+ (vf << NIC_QS_ID_SHIFT) |
+ (i << 3);
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ for (i = 0; i <= 15; i++) {
+ qnum = i >> 1;
+ stat = i & 1 ? 1 : 0;
+ reg_addr = (vf << NIC_QS_ID_SHIFT) |
+ (qnum << NIC_Q_NUM_SHIFT) | (stat << 3);
+ if (cfg->rq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ if (cfg->sq_stat_mask & BIT(i)) {
+ reg_addr |= NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1;
+ nic_reg_write(nic, reg_addr, 0);
+ }
+ }
+
+ return 0;
+}
+
+static void nic_enable_tunnel_parsing(struct nicpf *nic, int vf)
+{
+ u64 prot_def = (IPV6_PROT << 32) | (IPV4_PROT << 16) | ET_PROT;
+ u64 vxlan_prot_def = (IPV6_PROT_DEF << 32) |
+ (IPV4_PROT_DEF) << 16 | ET_PROT_DEF;
+
+ /* Configure tunnel parsing parameters */
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_DEF,
+ (1ULL << 63 | UDP_GENEVE_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_GENEVE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_NVGRE_PROT_DEF,
+ ((7ULL << 61) | prot_def));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_DEF_0_1,
+ ((1ULL << 63) | UDP_VXLAN_PORT_NUM));
+ nic_reg_write(nic, NIC_PF_RX_VXLAN_PROT_DEF,
+ ((0xfULL << 60) | vxlan_prot_def));
+}
+
+static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
+{
+ int bgx, lmac;
+
+ nic->vf_enabled[vf] = enable;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
+}
+
+static void nic_pause_frame(struct nicpf *nic, int vf, struct pfc *cfg)
+{
+ int bgx, lmac;
+ struct pfc pfc;
+ union nic_mbx mbx = {};
+
+ if (vf >= nic->num_vf_en)
+ return;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ if (cfg->get) {
+ bgx_lmac_get_pfc(nic->node, bgx, lmac, &pfc);
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.autoneg = pfc.autoneg;
+ mbx.pfc.fc_rx = pfc.fc_rx;
+ mbx.pfc.fc_tx = pfc.fc_tx;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ } else {
+ bgx_lmac_set_pfc(nic->node, bgx, lmac, cfg);
+ nic_mbx_send_ack(nic, vf);
+ }
+}
+
+/* Enable or disable HW timestamping by BGX for pkts received on a LMAC */
+static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp)
+{
+ struct pkind_cfg *pkind;
+ u8 lmac, bgx_idx;
+ u64 pkind_val, pkind_idx;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ pkind_idx = lmac + bgx_idx * MAX_LMAC_PER_BGX;
+ pkind_val = nic_reg_read(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3));
+ pkind = (struct pkind_cfg *)&pkind_val;
+
+ if (ptp->enable && !pkind->hdr_sl) {
+ /* Skiplen to exclude 8byte timestamp while parsing pkt
+ * If not configured, will result in L2 errors.
+ */
+ pkind->hdr_sl = 4;
+ /* Adjust max packet length allowed */
+ pkind->maxlen += (pkind->hdr_sl * 2);
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, true);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_ENDPARSE << 16) | ETH_P_1588);
+ } else if (!ptp->enable && pkind->hdr_sl) {
+ pkind->maxlen -= (pkind->hdr_sl * 2);
+ pkind->hdr_sl = 0;
+ bgx_config_timestamping(nic->node, bgx_idx, lmac, false);
+ nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7 | (1 << 3),
+ (ETYPE_ALG_SKIP << 16) | ETH_P_8021Q);
+ }
+
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val);
+}
+
+/* Get BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_link_status_get(struct nicpf *nic, u8 vf)
+{
+ union nic_mbx mbx = {};
+ struct bgx_link_status link;
+ u8 bgx, lmac;
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ mbx.link_status.mac_type = link.mac_type;
+
+ /* reply with link status */
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ u64 cfg;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg 0x%02x from VF%d\n",
+ __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ return;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ cfg = mbx.qs.cfg;
+ /* Check if its a secondary Qset */
+ if (vf >= nic->num_vf_en) {
+ cfg = cfg & (~0x7FULL);
+ /* Assign this Qset to primary Qset's VF */
+ cfg |= nic->pqs_vf[vf];
+ }
+ nic_reg_write(nic, reg_addr, cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ /* Enable CQE_RX2_S extension in CQE_RX descriptor.
+ * This gets appended by default on 81xx/83xx chips,
+ * for consistency enabling the same on 88xx pass2
+ * where this is introduced.
+ */
+ if (pass2_silicon(nic->pdev))
+ nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
+ if (!pass1_silicon(nic->pdev))
+ nic_enable_tunnel_parsing(nic, vf);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ return;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic_enable_vf(nic, vf, true);
+ break;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ if (vf >= nic->num_vf_en)
+ nic->sqs_used[vf - nic->num_vf_en] = false;
+ nic->pqs_vf[vf] = 0;
+ nic_enable_vf(nic, vf, false);
+ break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic_alloc_sqs(nic, &mbx.sqs_alloc);
+ return;
+ case NIC_MBOX_MSG_NICVF_PTR:
+ nic->nicvf[vf] = mbx.nicvf.nicvf;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ nic_send_pnicvf(nic, vf);
+ return;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ nic_send_snicvf(nic, &mbx.nicvf);
+ return;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nic_get_bgx_stats(nic, &mbx.bgx_stats);
+ return;
+ case NIC_MBOX_MSG_LOOPBACK:
+ ret = nic_config_loopback(nic, &mbx.lbk);
+ break;
+ case NIC_MBOX_MSG_RESET_STAT_COUNTER:
+ ret = nic_reset_stat_counters(nic, vf, &mbx.reset_stat);
+ break;
+ case NIC_MBOX_MSG_PFC:
+ nic_pause_frame(nic, vf, &mbx.pfc);
+ return;
+ case NIC_MBOX_MSG_PTP_CFG:
+ nic_config_timestamp(nic, vf, &mbx.ptp);
+ break;
+ case NIC_MBOX_MSG_RESET_XCAST:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ bgx_reset_xcast_mode(nic->node, bgx, lmac,
+ vf < NIC_VF_PER_MBX_REG ? vf :
+ vf - NIC_VF_PER_MBX_REG);
+ break;
+
+ case NIC_MBOX_MSG_ADD_MCAST:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ bgx_set_dmac_cam_filter(nic->node, bgx, lmac,
+ mbx.xcast.mac,
+ vf < NIC_VF_PER_MBX_REG ? vf :
+ vf - NIC_VF_PER_MBX_REG);
+ break;
+
+ case NIC_MBOX_MSG_SET_XCAST:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode);
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ if (vf >= nic->num_vf_en) {
+ ret = -1; /* NACK */
+ break;
+ }
+ nic_link_status_get(nic, vf);
+ return;
+ default:
+ dev_err(&nic->pdev->dev,
+ "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret) {
+ nic_mbx_send_ack(nic, vf);
+ } else if (mbx.msg.msg != NIC_MBOX_MSG_READY) {
+ dev_err(&nic->pdev->dev, "NACK for MBOX 0x%02x from VF %d\n",
+ mbx.msg.msg, vf);
+ nic_mbx_send_nack(nic, vf);
+ }
+}
+
+static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+ int mbx;
+ u64 intr;
+ u8 vf;
+
+ if (irq == nic->irq_allocated[NIC_PF_INTR_ID_MBOX0])
+ mbx = 0;
+ else
+ mbx = 1;
+
+ intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+ dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < NIC_VF_PER_MBX_REG; vf++) {
+ if (intr & (1ULL << vf)) {
+ dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+ vf + (mbx * NIC_VF_PER_MBX_REG));
+
+ nic_handle_mbx_intr(nic, vf +
+ (mbx * NIC_VF_PER_MBX_REG));
+ nic_clear_mbx_intr(nic, vf, mbx);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+ int irq;
+
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->irq_allocated[irq], nic);
+ nic->irq_allocated[irq] = 0;
+ }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+ int i, ret, irq;
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed, returned %d\n",
+ nic->num_vec, ret);
+ return ret;
+ }
+
+ /* Register mailbox interrupt handler */
+ for (i = NIC_PF_INTR_ID_MBOX0; i < nic->num_vec; i++) {
+ sprintf(nic->irq_name[i],
+ "NICPF Mbox%d", (i - NIC_PF_INTR_ID_MBOX0));
+
+ irq = pci_irq_vector(nic->pdev, i);
+ ret = request_irq(irq, nic_mbx_intr_handler, 0,
+ nic->irq_name[i], nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[i] = irq;
+ }
+
+ /* Enable mailbox interrupt */
+ nic_enable_mbx_intr(nic);
+ return 0;
+
+fail:
+ dev_err(&nic->pdev->dev, "Request irq failed\n");
+ nic_free_all_interrupts(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
+ return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+ nic_free_all_interrupts(nic);
+ pci_free_irq_vectors(nic->pdev);
+ nic->num_vec = 0;
+}
+
+static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
+{
+ int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
+ u16 total_vf;
+
+ /* Secondary Qsets are needed only if CPU count is
+ * morethan MAX_QUEUES_PER_QSET.
+ */
+ if (num_online_cpus() <= MAX_QUEUES_PER_QSET)
+ return 0;
+
+ /* Check if its a multi-node environment */
+ if (nr_node_ids > 1)
+ sqs_per_vf = MAX_SQS_PER_VF;
+
+ pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
+ return min(total_vf - vf_en, vf_en * sqs_per_vf);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+ int pos = 0;
+ int vf_en;
+ int err;
+ u16 total_vf_cnt;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+ if (total_vf_cnt < nic->num_vf_en)
+ nic->num_vf_en = total_vf_cnt;
+
+ if (!total_vf_cnt)
+ return 0;
+
+ vf_en = nic->num_vf_en;
+ nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
+ vf_en += nic->num_sqs_en;
+
+ err = pci_enable_sriov(pdev, vf_en);
+ if (err) {
+ dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+ vf_en);
+ nic->num_vf_en = 0;
+ return err;
+ }
+
+ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+ vf_en);
+
+ nic->flags |= NIC_SRIOV_ENABLED;
+ return 0;
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct nicpf *nic;
+ u8 max_lmac;
+ int err;
+
+ BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+ nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+ if (!nic)
+ return -ENOMEM;
+
+ nic->hw = devm_kzalloc(dev, sizeof(struct hw_info), GFP_KERNEL);
+ if (!nic->hw)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nic);
+
+ nic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ pci_set_drvdata(pdev, NULL);
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ nic->node = nic_get_node_id(pdev);
+
+ /* Get HW capability info */
+ nic_get_hw_info(nic);
+
+ /* Allocate memory for LMAC tracking elements */
+ err = -ENOMEM;
+ max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
+
+ nic->vf_lmac_map = devm_kmalloc_array(dev, max_lmac, sizeof(u8),
+ GFP_KERNEL);
+ if (!nic->vf_lmac_map)
+ goto err_release_regions;
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Register interrupts */
+ err = nic_register_interrupts(nic);
+ if (err)
+ goto err_release_regions;
+
+ /* Configure SRIOV */
+ err = nic_sriov_init(pdev, nic);
+ if (err)
+ goto err_unregister_interrupts;
+
+ return 0;
+
+err_unregister_interrupts:
+ nic_unregister_interrupts(nic);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+ struct nicpf *nic = pci_get_drvdata(pdev);
+
+ if (!nic)
+ return;
+
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+
+ nic_unregister_interrupts(nic);
+ pci_release_regions(pdev);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+ .name = DRV_NAME,
+ .id_table = nic_id_table,
+ .probe = nic_probe,
+ .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+ pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 000000000..b3bd24feb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CFG (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_RX_GENEVE_DEF (0x0580)
+#define UDP_GENEVE_PORT_NUM 0x17C1ULL
+#define NIC_PF_RX_GENEVE_PROT_DEF (0x0588)
+#define IPV6_PROT 0x86DDULL
+#define IPV4_PROT 0x800ULL
+#define ET_PROT 0x6558ULL
+#define NIC_PF_RX_NVGRE_PROT_DEF (0x0598)
+#define NIC_PF_RX_VXLAN_DEF_0_1 (0x05A0)
+#define UDP_VXLAN_PORT_NUM 0x12B5
+#define NIC_PF_RX_VXLAN_PROT_DEF (0x05B0)
+#define IPV6_PROT_DEF 0x2ULL
+#define IPV4_PROT_DEF 0x1ULL
+#define ET_PROT_DEF 0x3ULL
+#define NIC_PF_RX_CFG (0x05D0)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_INTFX_SEND_CFG (0x4000)
+#define NIC_PF_MCAM_0_191_ENA (0x100000)
+#define NIC_PF_MCAM_0_191_M_0_5_DATA (0x110000)
+#define NIC_PF_MCAM_CTRL (0x120000)
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_MPI_0_2047_CFG (0x210000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+#define NIC_PF_SW_SYNC_RX (0x490000)
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_LMAC (0x540000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63:22;
+ u64 hdr_sl:5; /* Header skip length */
+ u64 rx_hdr:3; /* TNS Receive header present */
+ u64 lenerr_en:1;/* L2 length error check enable */
+ u64 reserved_32_32:1;
+ u64 maxlen:16; /* Max frame size */
+ u64 minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 minlen:16;
+ u64 maxlen:16;
+ u64 reserved_32_32:1;
+ u64 lenerr_en:1;
+ u64 rx_hdr:3;
+ u64 hdr_sl:5;
+ u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 000000000..e5c71f907
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/net_tstamp.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
+
+#define DRV_NAME "nicvf"
+
+struct nicvf_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+ NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_frames),
+ NICVF_HW_STAT(rx_ucast_frames),
+ NICVF_HW_STAT(rx_bcast_frames),
+ NICVF_HW_STAT(rx_mcast_frames),
+ NICVF_HW_STAT(rx_drops),
+ NICVF_HW_STAT(rx_drop_red),
+ NICVF_HW_STAT(rx_drop_red_bytes),
+ NICVF_HW_STAT(rx_drop_overrun),
+ NICVF_HW_STAT(rx_drop_overrun_bytes),
+ NICVF_HW_STAT(rx_drop_bcast),
+ NICVF_HW_STAT(rx_drop_mcast),
+ NICVF_HW_STAT(rx_drop_l3_bcast),
+ NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(tx_bytes),
+ NICVF_HW_STAT(tx_frames),
+ NICVF_HW_STAT(tx_ucast_frames),
+ NICVF_HW_STAT(tx_bcast_frames),
+ NICVF_HW_STAT(tx_mcast_frames),
+ NICVF_HW_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+ NICVF_DRV_STAT(rx_bgx_truncated_pkts),
+ NICVF_DRV_STAT(rx_jabber_errs),
+ NICVF_DRV_STAT(rx_fcs_errs),
+ NICVF_DRV_STAT(rx_bgx_errs),
+ NICVF_DRV_STAT(rx_prel2_errs),
+ NICVF_DRV_STAT(rx_l2_hdr_malformed),
+ NICVF_DRV_STAT(rx_oversize),
+ NICVF_DRV_STAT(rx_undersize),
+ NICVF_DRV_STAT(rx_l2_len_mismatch),
+ NICVF_DRV_STAT(rx_l2_pclp),
+ NICVF_DRV_STAT(rx_ip_ver_errs),
+ NICVF_DRV_STAT(rx_ip_csum_errs),
+ NICVF_DRV_STAT(rx_ip_hdr_malformed),
+ NICVF_DRV_STAT(rx_ip_payload_malformed),
+ NICVF_DRV_STAT(rx_ip_ttl_errs),
+ NICVF_DRV_STAT(rx_l3_pclp),
+ NICVF_DRV_STAT(rx_l4_malformed),
+ NICVF_DRV_STAT(rx_l4_csum_errs),
+ NICVF_DRV_STAT(rx_udp_len_errs),
+ NICVF_DRV_STAT(rx_l4_port_errs),
+ NICVF_DRV_STAT(rx_tcp_flag_errs),
+ NICVF_DRV_STAT(rx_tcp_offset_errs),
+ NICVF_DRV_STAT(rx_l4_pclp),
+ NICVF_DRV_STAT(rx_truncated_pkts),
+
+ NICVF_DRV_STAT(tx_desc_fault),
+ NICVF_DRV_STAT(tx_hdr_cons_err),
+ NICVF_DRV_STAT(tx_subdesc_err),
+ NICVF_DRV_STAT(tx_max_size_exceeded),
+ NICVF_DRV_STAT(tx_imm_size_oflow),
+ NICVF_DRV_STAT(tx_data_seq_err),
+ NICVF_DRV_STAT(tx_mem_seq_err),
+ NICVF_DRV_STAT(tx_lock_viol),
+ NICVF_DRV_STAT(tx_data_fault),
+ NICVF_DRV_STAT(tx_tstmp_conflict),
+ NICVF_DRV_STAT(tx_tstmp_timeout),
+ NICVF_DRV_STAT(tx_mem_fault),
+ NICVF_DRV_STAT(tx_csum_overlap),
+ NICVF_DRV_STAT(tx_csum_overflow),
+
+ NICVF_DRV_STAT(tx_tso),
+ NICVF_DRV_STAT(tx_timeout),
+ NICVF_DRV_STAT(txq_stop),
+ NICVF_DRV_STAT(txq_wake),
+ NICVF_DRV_STAT(rcv_buffer_alloc_failures),
+ NICVF_DRV_STAT(page_alloc),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ u32 supported, advertising;
+
+ supported = 0;
+ advertising = 0;
+
+ if (!nic->link_up) {
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
+ return 0;
+ }
+
+ switch (nic->speed) {
+ case SPEED_1000:
+ cmd->base.port = PORT_MII | PORT_TP;
+ cmd->base.autoneg = AUTONEG_ENABLE;
+ supported |= SUPPORTED_MII | SUPPORTED_TP;
+ supported |= SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_10baseT_Half;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half;
+ break;
+ case SPEED_10000:
+ if (nic->mac_type == BGX_MODE_RXAUI) {
+ cmd->base.port = PORT_TP;
+ supported |= SUPPORTED_TP;
+ } else {
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_FIBRE;
+ }
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_10000baseT_Full;
+ break;
+ case SPEED_40000:
+ cmd->base.port = PORT_FIBRE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ supported |= SUPPORTED_FIBRE;
+ supported |= SUPPORTED_40000baseCR4_Full;
+ break;
+ }
+ cmd->base.duplex = nic->duplex;
+ cmd->base.speed = nic->speed;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ return 0;
+}
+
+static u32 nicvf_get_link(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->link_up;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ nic->msg_enable = lvl;
+}
+
+static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
+{
+ int stats, qidx;
+ int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "rxq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(*data, "txq%d: %s", qidx + start_qidx,
+ nicvf_queue_stats[stats].name);
+ *data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stats;
+ int sqs;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+ memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+ memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ nicvf_get_qset_strings(nic, &data, 0);
+
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
+ }
+
+ for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qstats_count;
+ int sqs;
+
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ qstats_count = nicvf_n_queue_stats *
+ (nic->qs->rq_cnt + nic->qs->sq_cnt);
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ struct nicvf *snic;
+
+ snic = nic->snicvf[sqs];
+ if (!snic)
+ continue;
+ qstats_count += nicvf_n_queue_stats *
+ (snic->qs->rq_cnt + snic->qs->sq_cnt);
+ }
+
+ return nicvf_n_hw_stats + nicvf_n_drv_stats +
+ qstats_count +
+ BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_qset_stats(struct nicvf *nic,
+ struct ethtool_stats *stats, u64 **data)
+{
+ int stat, qidx;
+
+ if (!nic)
+ return;
+
+ for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
+ nicvf_update_rq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
+ nicvf_update_sq_stats(nic, qidx);
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stat, tmp_stats;
+ int sqs, cpu;
+
+ nicvf_update_stats(nic);
+
+ /* Update LMAC stats */
+ nicvf_update_lmac_stats(nic);
+
+ for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+ *(data++) = ((u64 *)&nic->hw_stats)
+ [nicvf_hw_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
+ tmp_stats = 0;
+ for_each_possible_cpu(cpu)
+ tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
+ [nicvf_drv_stats[stat].index];
+ *(data++) = tmp_stats;
+ }
+
+ nicvf_get_qset_stats(nic, stats, &data);
+
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ if (!nic->snicvf[sqs])
+ continue;
+ nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
+ }
+
+ for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.rx_stats[stat];
+ for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+ return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *reg)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ u64 *p = (u64 *)reg;
+ u64 reg_offset;
+ int mbox, key, stat, q;
+ int i = 0;
+
+ regs->version = 0;
+ memset(p, 0, NIC_VF_REG_COUNT);
+
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+ /* Mailbox registers */
+ for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+ /* Tx/Rx statistics */
+ for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+ /* All completion queue's registers */
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+ }
+
+ /* All receive queue's registers */
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+ /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+ * produces bus errors when read
+ */
+ p[i++] = 0;
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS1, q);
+ reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+ return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+
+ ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
+ ring->rx_pending = qs->cq_len;
+ ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ u32 rx_count, tx_count;
+
+ /* Due to HW errata this is not supported on T88 pass 1.x silicon */
+ if (pass1_silicon(nic->pdev))
+ return -EINVAL;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ tx_count = clamp_t(u32, ring->tx_pending,
+ MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
+ rx_count = clamp_t(u32, ring->rx_pending,
+ MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
+
+ if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
+ return 0;
+
+ /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
+ qs->sq_len = rounddown_pow_of_two(tx_count);
+ qs->cq_len = rounddown_pow_of_two(rx_count);
+
+ if (netif_running(netdev)) {
+ nicvf_stop(netdev);
+ nicvf_open(netdev);
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ fallthrough;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nic->rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return nicvf_get_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ if (!rss->enable)
+ netdev_err(nic->netdev,
+ "RSS is disabled, hash cannot be set\n");
+
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ info->flow_type, info->data);
+
+ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_UDP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = RSS_HASH_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+ return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return nicvf_set_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, const u8 hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!rss->enable) {
+ netdev_err(nic->netdev,
+ "RSS is disabled, cannot change settings\n");
+ return -EIO;
+ }
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+ }
+
+ nicvf_config_rss(nic);
+ return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = nic->max_queues;
+ channel->max_tx = nic->max_queues;
+
+ channel->rx_count = nic->rx_queues;
+ channel->tx_count = nic->tx_queues;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int err = 0;
+ bool if_up = netif_running(dev);
+ u8 cqcount, txq_count;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+ if (channel->rx_count > nic->max_queues)
+ return -EINVAL;
+ if (channel->tx_count > nic->max_queues)
+ return -EINVAL;
+
+ if (nic->xdp_prog &&
+ ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
+ netdev_err(nic->netdev,
+ "XDP mode, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -EINVAL;
+ }
+
+ if (if_up)
+ nicvf_stop(dev);
+
+ nic->rx_queues = channel->rx_count;
+ nic->tx_queues = channel->tx_count;
+ if (!nic->xdp_prog)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = channel->rx_count;
+
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cqcount = max(nic->rx_queues, txq_count);
+
+ if (cqcount > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
+ if (err)
+ return err;
+
+ if (if_up)
+ nicvf_open(dev);
+
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ nic->tx_queues, nic->rx_queues);
+
+ return err;
+}
+
+static void nicvf_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ union nic_mbx mbx = {};
+
+ /* Supported only for 10G/40G interfaces */
+ if ((nic->mac_type == BGX_MODE_SGMII) ||
+ (nic->mac_type == BGX_MODE_QSGMII) ||
+ (nic->mac_type == BGX_MODE_RGMII))
+ return;
+
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.get = 1;
+ if (!nicvf_send_msg_to_pf(nic, &mbx)) {
+ pause->autoneg = nic->pfc.autoneg;
+ pause->rx_pause = nic->pfc.fc_rx;
+ pause->tx_pause = nic->pfc.fc_tx;
+ }
+}
+
+static int nicvf_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ union nic_mbx mbx = {};
+
+ /* Supported only for 10G/40G interfaces */
+ if ((nic->mac_type == BGX_MODE_SGMII) ||
+ (nic->mac_type == BGX_MODE_QSGMII) ||
+ (nic->mac_type == BGX_MODE_RGMII))
+ return -EOPNOTSUPP;
+
+ if (pause->autoneg)
+ return -EOPNOTSUPP;
+
+ mbx.pfc.msg = NIC_MBOX_MSG_PFC;
+ mbx.pfc.get = 0;
+ mbx.pfc.fc_rx = pause->rx_pause;
+ mbx.pfc.fc_tx = pause->tx_pause;
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return -EAGAIN;
+
+ nic->pfc.fc_rx = pause->rx_pause;
+ nic->pfc.fc_tx = pause->tx_pause;
+
+ return 0;
+}
+
+static int nicvf_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+ .get_link = nicvf_get_link,
+ .get_drvinfo = nicvf_get_drvinfo,
+ .get_msglevel = nicvf_get_msglevel,
+ .set_msglevel = nicvf_set_msglevel,
+ .get_strings = nicvf_get_strings,
+ .get_sset_count = nicvf_get_sset_count,
+ .get_ethtool_stats = nicvf_get_ethtool_stats,
+ .get_regs_len = nicvf_get_regs_len,
+ .get_regs = nicvf_get_regs,
+ .get_coalesce = nicvf_get_coalesce,
+ .get_ringparam = nicvf_get_ringparam,
+ .set_ringparam = nicvf_set_ringparam,
+ .get_rxnfc = nicvf_get_rxnfc,
+ .set_rxnfc = nicvf_set_rxnfc,
+ .get_rxfh_key_size = nicvf_get_rxfh_key_size,
+ .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
+ .get_rxfh = nicvf_get_rxfh,
+ .set_rxfh = nicvf_set_rxfh,
+ .get_channels = nicvf_get_channels,
+ .set_channels = nicvf_set_channels,
+ .get_pauseparam = nicvf_get_pauseparam,
+ .set_pauseparam = nicvf_set_pauseparam,
+ .get_ts_info = nicvf_get_ts_info,
+ .get_link_ksettings = nicvf_get_link_ksettings,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 000000000..f2f95493e
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,2323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+#include <linux/iommu.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/filter.h>
+#include <linux/net_tstamp.h>
+#include <linux/workqueue.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+#include "../common/cavium_ptp.h"
+
+#define DRV_NAME "nicvf"
+#define DRV_VERSION "1.0"
+
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_81XX_NIC_VF) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_83XX_NIC_VF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, 0444);
+MODULE_PARM_DESC(cpi_alg,
+ "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
+{
+ if (nic->sqs_mode)
+ return qidx + ((nic->sqs_id + 1) * MAX_CMP_QUEUES_PER_QS);
+ else
+ return qidx;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ unsigned long timeout;
+ int ret = 0;
+
+ mutex_lock(&nic->rx_mode_mtx);
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ timeout = jiffies + msecs_to_jiffies(NIC_MBOX_MSG_TIMEOUT);
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked) {
+ netdev_err(nic->netdev,
+ "PF NACK to mbox msg 0x%02x from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ ret = -EINVAL;
+ break;
+ }
+ usleep_range(8000, 10000);
+ if (nic->pf_acked)
+ break;
+ if (time_after(jiffies, timeout)) {
+ netdev_err(nic->netdev,
+ "PF didn't ACK to mbox msg 0x%02x from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ ret = -EBUSY;
+ break;
+ }
+ }
+ mutex_unlock(&nic->rx_mode_mtx);
+ return ret;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void nicvf_send_cfg_done(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to CFG DONE msg\n");
+ }
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+ if (bgx->rx)
+ nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+ else
+ nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_acked = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ if (!nic->set_mac_pending)
+ eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
+ nic->sqs_mode = mbx.nic_cfg.sqs_mode;
+ nic->loopback_supported = mbx.nic_cfg.loopback_supported;
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ if (nic->link_up != mbx.link_status.link_up) {
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ nic->mac_type = mbx.link_status.mac_type;
+ if (nic->link_up) {
+ netdev_info(nic->netdev,
+ "Link is Up %d Mbps %s duplex\n",
+ nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
+ netif_carrier_on(nic->netdev);
+ netif_tx_start_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "Link is Down\n");
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
+ }
+ break;
+ case NIC_MBOX_MSG_ALLOC_SQS:
+ nic->sqs_count = mbx.sqs_alloc.qs_count;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_SNICVF_PTR:
+ /* Primary VF: make note of secondary VF's pointer
+ * to be used while packet transmission.
+ */
+ nic->snicvf[mbx.nicvf.sqs_id] =
+ (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_PNICVF_PTR:
+ /* Secondary VF/Qset: make note of primary VF's pointer
+ * to be used while packet reception, to handover packet
+ * to primary VF's netdev.
+ */
+ nic->pnicvf = (struct nicvf *)mbx.nicvf.nicvf;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_PFC:
+ nic->pfc.autoneg = mbx.pfc.autoneg;
+ nic->pfc.fc_rx = mbx.pfc.fc_rx;
+ nic->pfc.fc_tx = mbx.pfc.fc_tx;
+ nic->pf_acked = true;
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+ union nic_mbx mbx = {};
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int ind_tbl_len = rss->rss_size;
+ int i, nextq = 0;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ while (ind_tbl_len) {
+ mbx.rss_cfg.tbl_offset = nextq;
+ mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+ RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+ for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ ind_tbl_len -= mbx.rss_cfg.tbl_len;
+ }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+ int idx;
+
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ nicvf_reg_write(nic, key_addr, rss->key[idx]);
+ key_addr += sizeof(u64);
+ }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ nicvf_get_rss_size(nic);
+
+ if (cpi_alg != CPI_ALG_NONE) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return 0;
+ }
+
+ rss->enable = true;
+
+ netdev_rss_key_fill(rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+
+ rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+ rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
+
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+ nic->rx_queues);
+ nicvf_config_rss(nic);
+ return 1;
+}
+
+/* Request PF to allocate additional Qsets */
+static void nicvf_request_sqs(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ int sqs;
+ int sqs_count = nic->sqs_count;
+ int rx_queues = 0, tx_queues = 0;
+
+ /* Only primary VF should request */
+ if (nic->sqs_mode || !nic->sqs_count)
+ return;
+
+ mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
+ mbx.sqs_alloc.vf_id = nic->vf_id;
+ mbx.sqs_alloc.qs_count = nic->sqs_count;
+ if (nicvf_send_msg_to_pf(nic, &mbx)) {
+ /* No response from PF */
+ nic->sqs_count = 0;
+ return;
+ }
+
+ /* Return if no Secondary Qsets available */
+ if (!nic->sqs_count)
+ return;
+
+ if (nic->rx_queues > MAX_RCV_QUEUES_PER_QS)
+ rx_queues = nic->rx_queues - MAX_RCV_QUEUES_PER_QS;
+
+ tx_queues = nic->tx_queues + nic->xdp_tx_queues;
+ if (tx_queues > MAX_SND_QUEUES_PER_QS)
+ tx_queues = tx_queues - MAX_SND_QUEUES_PER_QS;
+
+ /* Set no of Rx/Tx queues in each of the SQsets */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++) {
+ mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
+ mbx.nicvf.vf_id = nic->vf_id;
+ mbx.nicvf.sqs_id = sqs;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nic->snicvf[sqs]->sqs_id = sqs;
+ if (rx_queues > MAX_RCV_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS;
+ rx_queues -= MAX_RCV_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->rq_cnt = rx_queues;
+ rx_queues = 0;
+ }
+
+ if (tx_queues > MAX_SND_QUEUES_PER_QS) {
+ nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS;
+ tx_queues -= MAX_SND_QUEUES_PER_QS;
+ } else {
+ nic->snicvf[sqs]->qs->sq_cnt = tx_queues;
+ tx_queues = 0;
+ }
+
+ nic->snicvf[sqs]->qs->cq_cnt =
+ max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt);
+
+ /* Initialize secondary Qset's queues and its interrupts */
+ nicvf_open(nic->snicvf[sqs]->netdev);
+ }
+
+ /* Update stack with actual Rx/Tx queue count allocated */
+ if (sqs_count != nic->sqs_count)
+ nicvf_set_real_num_queues(nic->netdev,
+ nic->tx_queues, nic->rx_queues);
+}
+
+/* Send this Qset's nicvf pointer to PF.
+ * PF inturn sends primary VF's nicvf struct to secondary Qsets/VFs
+ * so that packets received by these Qsets can use primary VF's netdev
+ */
+static void nicvf_send_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_NICVF_PTR;
+ mbx.nicvf.sqs_mode = nic->sqs_mode;
+ mbx.nicvf.nicvf = (u64)nic;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_primary_vf_struct(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err = 0;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+ if (err) {
+ netdev_err(nic->netdev,
+ "Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
+ struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
+ struct rcv_queue *rq, struct sk_buff **skb)
+{
+ unsigned char *hard_start, *data;
+ struct xdp_buff xdp;
+ struct page *page;
+ u32 action;
+ u16 len, offset = 0;
+ u64 dma_addr, cpu_addr;
+ void *orig_data;
+
+ /* Retrieve packet buffer's DMA address and length */
+ len = *((u16 *)((void *)cqe_rx + (3 * sizeof(u64))));
+ dma_addr = *((u64 *)((void *)cqe_rx + (7 * sizeof(u64))));
+
+ cpu_addr = nicvf_iova_to_phys(nic, dma_addr);
+ if (!cpu_addr)
+ return false;
+ cpu_addr = (u64)phys_to_virt(cpu_addr);
+ page = virt_to_page((void *)cpu_addr);
+
+ xdp_init_buff(&xdp, RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ &rq->xdp_rxq);
+ hard_start = page_address(page);
+ data = (unsigned char *)cpu_addr;
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start, len, false);
+ orig_data = xdp.data;
+
+ action = bpf_prog_run_xdp(prog, &xdp);
+
+ len = xdp.data_end - xdp.data;
+ /* Check if XDP program has changed headers */
+ if (orig_data != xdp.data) {
+ offset = orig_data - xdp.data;
+ dma_addr -= offset;
+ }
+
+ switch (action) {
+ case XDP_PASS:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ /* Build SKB and pass on packet to network stack */
+ *skb = build_skb(xdp.data,
+ RCV_FRAG_LEN - cqe_rx->align_pad + offset);
+ if (!*skb)
+ put_page(page);
+ else
+ skb_put(*skb, len);
+ return false;
+ case XDP_TX:
+ nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
+ return true;
+ default:
+ bpf_warn_invalid_xdp_action(nic->netdev, prog, action);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(nic->netdev, prog, action);
+ fallthrough;
+ case XDP_DROP:
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ put_page(page);
+ return true;
+ }
+ return false;
+}
+
+static void nicvf_snd_ptp_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct skb_shared_hwtstamps ts;
+ u64 ns;
+
+ nic = nic->pnicvf;
+
+ /* Sync for 'ptp_skb' */
+ smp_rmb();
+
+ /* New timestamp request can be queued now */
+ atomic_set(&nic->tx_ptp_skbs, 0);
+
+ /* Check for timestamp requested skb */
+ if (!nic->ptp_skb)
+ return;
+
+ /* Check if timestamping is timedout, which is set to 10us */
+ if (cqe_tx->send_status == CQ_TX_ERROP_TSTMP_TIMEOUT ||
+ cqe_tx->send_status == CQ_TX_ERROP_TSTMP_CONFLICT)
+ goto no_tstamp;
+
+ /* Get the timestamp */
+ memset(&ts, 0, sizeof(ts));
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock, cqe_tx->ptp_timestamp);
+ ts.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(nic->ptp_skb, &ts);
+
+no_tstamp:
+ /* Free the original skb */
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ /* Sync 'ptp_skb' */
+ smp_wmb();
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+ struct cqe_send_t *cqe_tx,
+ int budget, int *subdesc_cnt,
+ unsigned int *tx_pkts, unsigned int *tx_bytes)
+{
+ struct sk_buff *skb = NULL;
+ struct page *page;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
+
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ /* Check for errors */
+ if (cqe_tx->send_status)
+ nicvf_check_cqe_tx_errs(nic->pnicvf, cqe_tx);
+
+ /* Is this a XDP designated Tx queue */
+ if (sq->is_xdp) {
+ page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr];
+ /* Check if it's recycled page or else unmap DMA mapping */
+ if (page && (page_ref_count(page) == 1))
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
+
+ /* Release page reference for recycling */
+ if (page)
+ put_page(page);
+ sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL;
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
+ return;
+ }
+
+ skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+ if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
+ *subdesc_cnt += tso_sqe->subdesc_cnt + 1;
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
+ }
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
+ prefetch(skb);
+ (*tx_pkts)++;
+ *tx_bytes += skb->len;
+ /* If timestamp is requested for this skb, don't free it */
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ !nic->pnicvf->ptp_skb)
+ nic->pnicvf->ptp_skb = skb;
+ else
+ napi_consume_skb(skb, budget);
+ sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
+ } else {
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
+ */
+ if (!nic->hw_tso)
+ *subdesc_cnt += hdr->subdesc_cnt + 1;
+ }
+}
+
+static inline void nicvf_set_rxhash(struct net_device *netdev,
+ struct cqe_rx_t *cqe_rx,
+ struct sk_buff *skb)
+{
+ u8 hash_type;
+ u32 hash;
+
+ if (!(netdev->features & NETIF_F_RXHASH))
+ return;
+
+ switch (cqe_rx->rss_alg) {
+ case RSS_ALG_TCP_IP:
+ case RSS_ALG_UDP_IP:
+ hash_type = PKT_HASH_TYPE_L4;
+ hash = cqe_rx->rss_tag;
+ break;
+ case RSS_ALG_IP:
+ hash_type = PKT_HASH_TYPE_L3;
+ hash = cqe_rx->rss_tag;
+ break;
+ default:
+ hash_type = PKT_HASH_TYPE_NONE;
+ hash = 0;
+ }
+
+ skb_set_hash(skb, hash, hash_type);
+}
+
+static inline void nicvf_set_rxtstamp(struct nicvf *nic, struct sk_buff *skb)
+{
+ u64 ns;
+
+ if (!nic->ptp_clock || !nic->hw_rx_tstamp)
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ ns = cavium_ptp_tstamp2time(nic->ptp_clock,
+ be64_to_cpu(*(__be64 *)skb->data));
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
+
+ __skb_pull(skb, 8);
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+ struct cqe_rx_t *cqe_rx,
+ struct snd_queue *sq, struct rcv_queue *rq)
+{
+ struct sk_buff *skb = NULL;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
+ int err = 0;
+ int rq_idx;
+
+ rq_idx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
+
+ if (nic->sqs_mode) {
+ /* Use primary VF's 'nicvf' struct */
+ nic = nic->pnicvf;
+ netdev = nic->netdev;
+ }
+
+ /* Check for errors */
+ if (cqe_rx->err_level || cqe_rx->err_opcode) {
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+ }
+
+ /* For XDP, ignore pkts spanning multiple pages */
+ if (nic->xdp_prog && (cqe_rx->rb_cnt == 1)) {
+ /* Packet consumed by XDP */
+ if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb))
+ return;
+ } else {
+ skb = nicvf_get_rcv_skb(snic, cqe_rx,
+ nic->xdp_prog ? true : false);
+ }
+
+ if (!skb)
+ return;
+
+ if (netif_msg_pktdata(nic)) {
+ netdev_info(nic->netdev, "skb 0x%p, len=%d\n", skb, skb->len);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+ }
+
+ /* If error packet, drop it here */
+ if (err) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ nicvf_set_rxtstamp(nic, skb);
+ nicvf_set_rxhash(netdev, cqe_rx, skb);
+
+ skb_record_rx_queue(skb, rq_idx);
+ if (netdev->hw_features & NETIF_F_RXCSUM) {
+ /* HW by default verifies TCP/UDP/SCTP checksums */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* Check for stripped VLAN */
+ if (cqe_rx->vlan_found && cqe_rx->vlan_stripped)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ ntohs((__force __be16)cqe_rx->vlan_tci));
+
+ if (napi && (netdev->features & NETIF_F_GRO))
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ struct napi_struct *napi, int budget)
+{
+ int processed_cqe, work_done = 0, tx_done = 0;
+ int cqe_count, cqe_head;
+ int subdesc_cnt = 0;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_idx];
+ struct cqe_rx_t *cq_desc;
+ struct netdev_queue *txq;
+ struct snd_queue *sq = &qs->sq[cq_idx];
+ struct rcv_queue *rq = &qs->rq[cq_idx];
+ unsigned int tx_pkts = 0, tx_bytes = 0, txq_idx;
+
+ spin_lock_bh(&cq->lock);
+loop:
+ processed_cqe = 0;
+ /* Get no of valid CQ entries to process */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+ cqe_count &= CQ_CQE_COUNT;
+ if (!cqe_count)
+ goto done;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+ cqe_head &= 0xFFFF;
+
+ while (processed_cqe < cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ if ((work_done >= budget) && napi &&
+ (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+ break;
+ }
+
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+ nicvf_snd_pkt_handler(netdev, (void *)cq_desc,
+ budget, &subdesc_cnt,
+ &tx_pkts, &tx_bytes);
+ tx_done++;
+ break;
+ case CQE_TYPE_SEND_PTP:
+ nicvf_snd_ptp_handler(netdev, (void *)cq_desc);
+ break;
+ case CQE_TYPE_INVALID:
+ case CQE_TYPE_RX_SPLIT:
+ case CQE_TYPE_RX_TCP:
+ /* Ignore for now */
+ break;
+ }
+ processed_cqe++;
+ }
+
+ /* Ring doorbell to inform H/W to reuse processed CQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_idx, processed_cqe);
+
+ if ((work_done < budget) && napi)
+ goto loop;
+
+done:
+ /* Update SQ's descriptor free count */
+ if (subdesc_cnt)
+ nicvf_put_sq_desc(sq, subdesc_cnt);
+
+ txq_idx = nicvf_netdev_qidx(nic, cq_idx);
+ /* Handle XDP TX queues */
+ if (nic->pnicvf->xdp_prog) {
+ if (txq_idx < nic->pnicvf->xdp_tx_queues) {
+ nicvf_xdp_sq_doorbell(nic, sq, cq_idx);
+ goto out;
+ }
+ nic = nic->pnicvf;
+ txq_idx -= nic->pnicvf->xdp_tx_queues;
+ }
+
+ /* Wakeup TXQ if its stopped earlier due to SQ full */
+ if (tx_done ||
+ (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) {
+ netdev = nic->pnicvf->netdev;
+ txq = netdev_get_tx_queue(netdev, txq_idx);
+ if (tx_pkts)
+ netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+
+ /* To read updated queue and carrier status */
+ smp_mb();
+ if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
+ netif_tx_wake_queue(txq);
+ nic = nic->pnicvf;
+ this_cpu_inc(nic->drv_stats->txq_wake);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit queue wakeup SQ%d\n", txq_idx);
+ }
+ }
+
+out:
+ spin_unlock_bh(&cq->lock);
+ return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+ u64 cq_head;
+ int work_done = 0;
+ struct net_device *netdev = napi->dev;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_cq_poll *cq;
+
+ cq = container_of(napi, struct nicvf_cq_poll, napi);
+ work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+ napi_complete_done(napi, work_done);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx, cq_head);
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ }
+ return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(struct tasklet_struct *t)
+{
+ struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ netif_tx_disable(nic->netdev);
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+ }
+
+ netif_tx_start_all_queues(nic->netdev);
+ /* Re-enable Qset error interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static void nicvf_dump_intr_status(struct nicvf *nic)
+{
+ netif_info(nic, intr, nic->netdev, "interrupt status 0x%llx\n",
+ nicvf_reg_read(nic, NIC_VF_INT));
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u64 intr;
+
+ nicvf_dump_intr_status(nic);
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ /* Check for spurious interrupt */
+ if (!(intr & NICVF_INTR_MBOX_MASK))
+ return IRQ_HANDLED;
+
+ nicvf_handle_mbx_intr(nic);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
+{
+ struct nicvf_cq_poll *cq_poll = (struct nicvf_cq_poll *)cq_irq;
+ struct nicvf *nic = cq_poll->nicvf;
+ int qidx = cq_poll->cq_idx;
+
+ nicvf_dump_intr_status(nic);
+
+ /* Disable interrupts */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Schedule NAPI */
+ napi_schedule_irqoff(&cq_poll->napi);
+
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_rbdr_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u8 qidx;
+
+
+ nicvf_dump_intr_status(nic);
+
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < nic->qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+ continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ /* Clear interrupt */
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_qs_err_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+
+ nicvf_dump_intr_status(nic);
+
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ return IRQ_HANDLED;
+}
+
+static void nicvf_set_irq_affinity(struct nicvf *nic)
+{
+ int vec, cpu;
+
+ for (vec = 0; vec < nic->num_vec; vec++) {
+ if (!nic->irq_allocated[vec])
+ continue;
+
+ if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
+ return;
+ /* CQ interrupts */
+ if (vec < NICVF_INTR_ID_SQ)
+ /* Leave CPU0 for RBDR and other interrupts */
+ cpu = nicvf_netdev_qidx(nic, vec) + 1;
+ else
+ cpu = 0;
+
+ cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
+ nic->affinity_mask[vec]);
+ irq_set_affinity_hint(pci_irq_vector(nic->pdev, vec),
+ nic->affinity_mask[vec]);
+ }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+ int irq, ret = 0;
+
+ for_each_cq_irq(irq)
+ sprintf(nic->irq_name[irq], "%s-rxtx-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq));
+
+ for_each_sq_irq(irq)
+ sprintf(nic->irq_name[irq], "%s-sq-%d",
+ nic->pnicvf->netdev->name,
+ nicvf_netdev_qidx(nic, irq - NICVF_INTR_ID_SQ));
+
+ for_each_rbdr_irq(irq)
+ sprintf(nic->irq_name[irq], "%s-rbdr-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
+
+ /* Register CQ interrupts */
+ for (irq = 0; irq < nic->qs->cq_cnt; irq++) {
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic->napi[irq]);
+ if (ret)
+ goto err;
+ nic->irq_allocated[irq] = true;
+ }
+
+ /* Register RBDR interrupt */
+ for (irq = NICVF_INTR_ID_RBDR;
+ irq < (NICVF_INTR_ID_RBDR + nic->qs->rbdr_cnt); irq++) {
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_rbdr_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ goto err;
+ nic->irq_allocated[irq] = true;
+ }
+
+ /* Register QS error interrupt */
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR], "%s-qset-err-%d",
+ nic->pnicvf->netdev->name,
+ nic->sqs_mode ? (nic->sqs_id + 1) : 0);
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_qs_err_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ goto err;
+
+ nic->irq_allocated[irq] = true;
+
+ /* Set IRQ affinities */
+ nicvf_set_irq_affinity(nic);
+
+err:
+ if (ret)
+ netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
+
+ return ret;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+ struct pci_dev *pdev = nic->pdev;
+ int irq;
+
+ /* Free registered interrupts */
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (!nic->irq_allocated[irq])
+ continue;
+
+ irq_set_affinity_hint(pci_irq_vector(pdev, irq), NULL);
+ free_cpumask_var(nic->affinity_mask[irq]);
+
+ if (irq < NICVF_INTR_ID_SQ)
+ free_irq(pci_irq_vector(pdev, irq), nic->napi[irq]);
+ else
+ free_irq(pci_irq_vector(pdev, irq), nic);
+
+ nic->irq_allocated[irq] = false;
+ }
+
+ /* Disable MSI-X */
+ pci_free_irq_vectors(pdev);
+ nic->num_vec = 0;
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+ int ret = 0;
+ int irq = NICVF_INTR_ID_MISC;
+
+ /* Return if mailbox interrupt is already registered */
+ if (nic->pdev->msix_enabled)
+ return 0;
+
+ /* Enable MSI-X */
+ nic->num_vec = pci_msix_vec_count(nic->pdev);
+ ret = pci_alloc_irq_vectors(nic->pdev, nic->num_vec, nic->num_vec,
+ PCI_IRQ_MSIX);
+ if (ret < 0) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
+ return ret;
+ }
+
+ sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+ /* Register Misc interrupt */
+ ret = request_irq(pci_irq_vector(nic->pdev, irq),
+ nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+ if (ret)
+ return ret;
+ nic->irq_allocated[irq] = true;
+
+ /* Enable mailbox interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ /* Check if VF is able to communicate with PF */
+ if (!nicvf_check_pf_ready(nic)) {
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qid = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+ struct nicvf *snic;
+ struct snd_queue *sq;
+ int tmp;
+
+ /* Check for minimum packet length */
+ if (skb->len <= ETH_HLEN) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* In XDP case, initial HW tx queues are used for XDP,
+ * but stack's queue mapping starts at '0', so skip the
+ * Tx queues attached to Rx queues for XDP.
+ */
+ if (nic->xdp_prog)
+ qid += nic->xdp_tx_queues;
+
+ snic = nic;
+ /* Get secondary Qset's SQ structure */
+ if (qid >= MAX_SND_QUEUES_PER_QS) {
+ tmp = qid / MAX_SND_QUEUES_PER_QS;
+ snic = (struct nicvf *)nic->snicvf[tmp - 1];
+ if (!snic) {
+ netdev_warn(nic->netdev,
+ "Secondary Qset#%d's ptr not initialized\n",
+ tmp - 1);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ qid = qid % MAX_SND_QUEUES_PER_QS;
+ }
+
+ sq = &snic->qs->sq[qid];
+ if (!netif_tx_queue_stopped(txq) &&
+ !nicvf_sq_append_skb(snic, sq, skb, qid)) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb();
+
+ /* Check again, incase another cpu freed descriptors */
+ if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) {
+ netif_tx_wake_queue(txq);
+ } else {
+ this_cpu_inc(nic->drv_stats->txq_stop);
+ netif_warn(nic, tx_err, netdev,
+ "Transmit ring full, stopping SQ%d\n", qid);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static inline void nicvf_free_cq_poll(struct nicvf *nic)
+{
+ struct nicvf_cq_poll *cq_poll;
+ int qidx;
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ kfree(cq_poll);
+ }
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+ int irq, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+ union nic_mbx mbx = {};
+
+ /* wait till all queued set_rx_mode tasks completes */
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ nic->link_up = false;
+
+ /* Teardown secondary qsets first */
+ if (!nic->sqs_mode) {
+ for (qidx = 0; qidx < nic->sqs_count; qidx++) {
+ if (!nic->snicvf[qidx])
+ continue;
+ nicvf_stop(nic->snicvf[qidx]->netdev);
+ nic->snicvf[qidx] = NULL;
+ }
+ }
+
+ /* Disable RBDR & QS error interrupts */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Wait for pending IRQ handlers to finish */
+ for (irq = 0; irq < nic->num_vec; irq++)
+ synchronize_irq(pci_irq_vector(nic->pdev, irq));
+
+ tasklet_kill(&nic->rbdr_task);
+ tasklet_kill(&nic->qs_err_task);
+ if (nic->rb_work_scheduled)
+ cancel_delayed_work_sync(&nic->rbdr_work);
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_synchronize(&cq_poll->napi);
+ /* CQ intr is enabled while napi_complete,
+ * so disable it now
+ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+
+ netif_tx_disable(netdev);
+
+ for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ /* disable mailbox interrupt */
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ nicvf_unregister_interrupts(nic);
+
+ nicvf_free_cq_poll(nic);
+
+ /* Free any pending SKB saved to receive timestamp */
+ if (nic->ptp_skb) {
+ dev_kfree_skb_any(nic->ptp_skb);
+ nic->ptp_skb = NULL;
+ }
+
+ /* Clear multiqset info */
+ nic->pnicvf = nic;
+
+ return 0;
+}
+
+static int nicvf_config_hw_rx_tstamp(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+
+ mbx.ptp.msg = NIC_MBOX_MSG_PTP_CFG;
+ mbx.ptp.enable = enable;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_link_status_check_task(struct work_struct *work_arg)
+{
+ struct nicvf *nic = container_of(work_arg,
+ struct nicvf,
+ link_change_work.work);
+ union nic_mbx mbx = {};
+ mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 2 * HZ);
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+ int cpu, err, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ /* wait till all queued set_rx_mode tasks completes if any */
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+
+ netif_carrier_off(netdev);
+
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ return err;
+
+ /* Register NAPI handler for processing CQEs */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+ if (!cq_poll) {
+ err = -ENOMEM;
+ goto napi_del;
+ }
+ cq_poll->cq_idx = qidx;
+ cq_poll->nicvf = nic;
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
+ napi_enable(&cq_poll->napi);
+ nic->napi[qidx] = cq_poll;
+ }
+
+ /* Check if we got MAC address from PF or else generate a radom MAC */
+ if (!nic->sqs_mode && is_zero_ether_addr(netdev->dev_addr)) {
+ eth_hw_addr_random(netdev);
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ if (nic->set_mac_pending) {
+ nic->set_mac_pending = false;
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ /* Init tasklet for handling Qset err interrupt */
+ tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
+
+ /* Init RBDR tasklet which will refill RBDR */
+ tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
+ INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = cpi_alg;
+ if (!nic->sqs_mode)
+ nicvf_config_cpi(nic);
+
+ nicvf_request_sqs(nic);
+ if (nic->sqs_mode)
+ nicvf_get_primary_vf_struct(nic);
+
+ /* Configure PTP timestamp */
+ if (nic->ptp_clock)
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+ atomic_set(&nic->tx_ptp_skbs, 0);
+ nic->ptp_skb = NULL;
+
+ /* Configure receive side scaling and MTU */
+ if (!nic->sqs_mode) {
+ nicvf_rss_init(nic);
+ err = nicvf_update_hw_max_frs(nic, netdev->mtu);
+ if (err)
+ goto cleanup;
+
+ /* Clear percpu stats */
+ for_each_possible_cpu(cpu)
+ memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
+ sizeof(struct nicvf_drv_stats));
+ }
+
+ err = nicvf_register_interrupts(nic);
+ if (err)
+ goto cleanup;
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ goto cleanup;
+
+ /* Make sure queue initialization is written */
+ wmb();
+
+ nicvf_reg_write(nic, NIC_VF_INT, -1);
+ /* Enable Qset err interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Enable completion queue interrupt */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Enable RBDR threshold interrupt */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+ /* Send VF config done msg to PF */
+ nicvf_send_cfg_done(nic);
+
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
+
+ return 0;
+cleanup:
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ tasklet_kill(&nic->qs_err_task);
+ tasklet_kill(&nic->rbdr_task);
+napi_del:
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ }
+ nicvf_free_cq_poll(nic);
+ return err;
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int orig_mtu = netdev->mtu;
+
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
+ netdev->mtu = new_mtu;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (nicvf_update_hw_max_frs(nic, new_mtu)) {
+ netdev->mtu = orig_mtu;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ if (nic->pdev->msix_enabled) {
+ if (nicvf_hw_set_mac_addr(nic, netdev))
+ return -EBUSY;
+ } else {
+ nic->set_mac_pending = true;
+ }
+
+ return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+ int stat = 0;
+ union nic_mbx mbx = {};
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = nic->vf_id;
+ /* Rx stats */
+ mbx.bgx_stats.rx = 1;
+ while (stat < BGX_RX_STATS_COUNT) {
+ mbx.bgx_stats.idx = stat;
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
+ stat++;
+ }
+
+ stat = 0;
+
+ /* Tx stats */
+ mbx.bgx_stats.rx = 0;
+ while (stat < BGX_TX_STATS_COUNT) {
+ mbx.bgx_stats.idx = stat;
+ if (nicvf_send_msg_to_pf(nic, &mbx))
+ return;
+ stat++;
+ }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+ int qidx, cpu;
+ u64 tmp_stats = 0;
+ struct nicvf_hw_stats *stats = &nic->hw_stats;
+ struct nicvf_drv_stats *drv_stats;
+ struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+ stats->rx_bytes = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames = GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_red_bytes = GET_RX_STATS(RX_RED_OCTS);
+ stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_overrun_bytes = GET_RX_STATS(RX_ORUN_OCTS);
+ stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+ /* On T88 pass 2.0, the dummy SQE added for TSO notification
+ * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
+ * pointed by dummy SQE and results in tx_drops counter being
+ * incremented. Subtracting it from tx_tso counter will give
+ * exact tx_drops counter.
+ */
+ if (nic->t88 && nic->hw_tso) {
+ for_each_possible_cpu(cpu) {
+ drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
+ tmp_stats += drv_stats->tx_tso;
+ }
+ stats->tx_drops = tmp_stats - stats->tx_drops;
+ }
+ stats->tx_frames = stats->tx_ucast_frames +
+ stats->tx_bcast_frames +
+ stats->tx_mcast_frames;
+ stats->rx_frames = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
+ stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+
+ /* Update RQ and SQ stats */
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_update_rq_stats(nic, qidx);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_update_sq_stats(nic, qidx);
+}
+
+static void nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
+
+ nicvf_update_stats(nic);
+
+ stats->rx_bytes = hw_stats->rx_bytes;
+ stats->rx_packets = hw_stats->rx_frames;
+ stats->rx_dropped = hw_stats->rx_drops;
+ stats->multicast = hw_stats->rx_mcast_frames;
+
+ stats->tx_bytes = hw_stats->tx_bytes;
+ stats->tx_packets = hw_stats->tx_frames;
+ stats->tx_dropped = hw_stats->tx_drops;
+
+}
+
+static void nicvf_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ netif_warn(nic, tx_err, dev, "Transmit timed out, resetting\n");
+
+ this_cpu_inc(nic->drv_stats->tx_timeout);
+ schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+ struct nicvf *nic;
+
+ nic = container_of(work, struct nicvf, reset_task);
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ nicvf_stop(nic->netdev);
+ nicvf_open(nic->netdev);
+ netif_trans_update(nic->netdev);
+}
+
+static int nicvf_config_loopback(struct nicvf *nic,
+ netdev_features_t features)
+{
+ union nic_mbx mbx = {};
+
+ mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK;
+ mbx.lbk.vf_id = nic->vf_id;
+ mbx.lbk.enable = (features & NETIF_F_LOOPBACK) != 0;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static netdev_features_t nicvf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if ((features & NETIF_F_LOOPBACK) &&
+ netif_running(netdev) && !nic->loopback_supported)
+ features &= ~NETIF_F_LOOPBACK;
+
+ return features;
+}
+
+static int nicvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ netdev_features_t changed = features ^ netdev->features;
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_RX)
+ nicvf_config_vlan_stripping(nic, features);
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
+ return nicvf_config_loopback(nic, features);
+
+ return 0;
+}
+
+static void nicvf_set_xdp_queues(struct nicvf *nic, bool bpf_attached)
+{
+ u8 cq_count, txq_count;
+
+ /* Set XDP Tx queue count same as Rx queue count */
+ if (!bpf_attached)
+ nic->xdp_tx_queues = 0;
+ else
+ nic->xdp_tx_queues = nic->rx_queues;
+
+ /* If queue count > MAX_CMP_QUEUES_PER_QS, then additional qsets
+ * needs to be allocated, check how many.
+ */
+ txq_count = nic->xdp_tx_queues + nic->tx_queues;
+ cq_count = max(nic->rx_queues, txq_count);
+ if (cq_count > MAX_CMP_QUEUES_PER_QS) {
+ nic->sqs_count = roundup(cq_count, MAX_CMP_QUEUES_PER_QS);
+ nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
+ } else {
+ nic->sqs_count = 0;
+ }
+
+ /* Set primary Qset's resources */
+ nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
+ nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
+ nic->qs->cq_cnt = max_t(u8, nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ /* Update stack */
+ nicvf_set_real_num_queues(nic->netdev, nic->tx_queues, nic->rx_queues);
+}
+
+static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
+{
+ struct net_device *dev = nic->netdev;
+ bool if_up = netif_running(nic->netdev);
+ struct bpf_prog *old_prog;
+ bool bpf_attached = false;
+ int ret = 0;
+
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ dev->mtu);
+ return -EOPNOTSUPP;
+ }
+
+ /* ALL SQs attached to CQs i.e same as RQs, are treated as
+ * XDP Tx queues and more Tx queues are allocated for
+ * network stack to send pkts out.
+ *
+ * No of Tx queues are either same as Rx queues or whatever
+ * is left in max no of queues possible.
+ */
+ if ((nic->rx_queues + nic->tx_queues) > nic->max_queues) {
+ netdev_warn(dev,
+ "Failed to attach BPF prog, RXQs + TXQs > Max %d\n",
+ nic->max_queues);
+ return -ENOMEM;
+ }
+
+ if (if_up)
+ nicvf_stop(nic->netdev);
+
+ old_prog = xchg(&nic->xdp_prog, prog);
+ /* Detach old prog, if any */
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (nic->xdp_prog) {
+ /* Attach BPF program */
+ bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1);
+ bpf_attached = true;
+ }
+
+ /* Calculate Tx queues needed for XDP and network stack */
+ nicvf_set_xdp_queues(nic, bpf_attached);
+
+ if (if_up) {
+ /* Reinitialize interface, clean slate */
+ nicvf_open(nic->netdev);
+ netif_trans_update(nic->netdev);
+ }
+
+ return ret;
+}
+
+static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ /* To avoid checks while retrieving buffer address from CQE_RX,
+ * do not support XDP for T88 pass1.x silicons which are anyway
+ * not in use widely.
+ */
+ if (pass1_silicon(nic->pdev))
+ return -EOPNOTSUPP;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return nicvf_xdp_setup(nic, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!nic->ptp_clock)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ nic->hw_rx_tstamp = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ nic->hw_rx_tstamp = true;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (netif_running(netdev))
+ nicvf_config_hw_rx_tstamp(nic, nic->hw_rx_tstamp);
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int nicvf_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return nicvf_config_hwtstamp(netdev, req);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs,
+ struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ int idx;
+
+ /* From the inside of VM code flow we have only 128 bits memory
+ * available to send message to host's PF, so send all mc addrs
+ * one by one, starting from flush command in case if kernel
+ * requests to configure specific MAC filtering
+ */
+
+ /* flush DMAC filters and reset RX mode */
+ mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST;
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
+
+ if (mode & BGX_XCAST_MCAST_FILTER) {
+ /* once enabling filtering, we need to signal to PF to add
+ * its' own LMAC to the filter to accept packets for it.
+ */
+ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
+ mbx.xcast.mac = 0;
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
+ }
+
+ /* check if we have any specific MACs to be added to PF DMAC filter */
+ if (mc_addrs) {
+ /* now go through kernel list of MACs and add them one by one */
+ for (idx = 0; idx < mc_addrs->count; idx++) {
+ mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
+ mbx.xcast.mac = mc_addrs->mc[idx];
+ if (nicvf_send_msg_to_pf(nic, &mbx) < 0)
+ goto free_mc;
+ }
+ }
+
+ /* and finally set rx mode for PF accordingly */
+ mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST;
+ mbx.xcast.mode = mode;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+free_mc:
+ kfree(mc_addrs);
+}
+
+static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
+{
+ struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work,
+ work);
+ struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
+ u8 mode;
+ struct xcast_addr_list *mc;
+
+ /* Save message data locally to prevent them from
+ * being overwritten by next ndo_set_rx_mode call().
+ */
+ spin_lock_bh(&nic->rx_mode_wq_lock);
+ mode = vf_work->mode;
+ mc = vf_work->mc;
+ vf_work->mc = NULL;
+ spin_unlock_bh(&nic->rx_mode_wq_lock);
+
+ __nicvf_set_rx_mode_task(mode, mc, nic);
+}
+
+static void nicvf_set_rx_mode(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct netdev_hw_addr *ha;
+ struct xcast_addr_list *mc_list = NULL;
+ u8 mode = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT;
+ } else {
+ if (netdev->flags & IFF_BROADCAST)
+ mode |= BGX_XCAST_BCAST_ACCEPT;
+
+ if (netdev->flags & IFF_ALLMULTI) {
+ mode |= BGX_XCAST_MCAST_ACCEPT;
+ } else if (netdev->flags & IFF_MULTICAST) {
+ mode |= BGX_XCAST_MCAST_FILTER;
+ /* here we need to copy mc addrs */
+ if (netdev_mc_count(netdev)) {
+ mc_list = kmalloc(struct_size(mc_list, mc,
+ netdev_mc_count(netdev)),
+ GFP_ATOMIC);
+ if (unlikely(!mc_list))
+ return;
+ mc_list->count = 0;
+ netdev_hw_addr_list_for_each(ha, &netdev->mc) {
+ mc_list->mc[mc_list->count] =
+ ether_addr_to_u64(ha->addr);
+ mc_list->count++;
+ }
+ }
+ }
+ }
+ spin_lock(&nic->rx_mode_wq_lock);
+ kfree(nic->rx_mode_work.mc);
+ nic->rx_mode_work.mc = mc_list;
+ nic->rx_mode_work.mode = mode;
+ queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work);
+ spin_unlock(&nic->rx_mode_wq_lock);
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+ .ndo_open = nicvf_open,
+ .ndo_stop = nicvf_stop,
+ .ndo_start_xmit = nicvf_xmit,
+ .ndo_change_mtu = nicvf_change_mtu,
+ .ndo_set_mac_address = nicvf_set_mac_address,
+ .ndo_get_stats64 = nicvf_get_stats64,
+ .ndo_tx_timeout = nicvf_tx_timeout,
+ .ndo_fix_features = nicvf_fix_features,
+ .ndo_set_features = nicvf_set_features,
+ .ndo_bpf = nicvf_xdp,
+ .ndo_eth_ioctl = nicvf_ioctl,
+ .ndo_set_rx_mode = nicvf_set_rx_mode,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct nicvf *nic;
+ int err, qcount;
+ u16 sdevid;
+ struct cavium_ptp *ptp_clock;
+
+ ptp_clock = cavium_ptp_get();
+ if (IS_ERR(ptp_clock)) {
+ if (PTR_ERR(ptp_clock) == -ENODEV)
+ /* In virtualized environment we proceed without ptp */
+ ptp_clock = NULL;
+ else
+ return PTR_ERR(ptp_clock);
+ }
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ qcount = netif_get_num_default_rss_queues();
+
+ /* Restrict multiqset support only for host bound VFs */
+ if (pdev->is_virtfn) {
+ /* Set max number of queues per VF */
+ qcount = min_t(int, num_online_cpus(),
+ (MAX_SQS_PER_VF + 1) * MAX_CMP_QUEUES_PER_QS);
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf), qcount, qcount);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+ nic->pnicvf = nic;
+ nic->max_queues = qcount;
+ /* If no of CPUs are too low, there won't be any queues left
+ * for XDP_TX, hence double it.
+ */
+ if (!nic->t88)
+ nic->max_queues *= 2;
+ nic->ptp_clock = ptp_clock;
+
+ /* Initialize mutex that serializes usage of VF's mailbox */
+ mutex_init(&nic->rx_mode_mtx);
+
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
+ if (!nic->drv_stats) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = nicvf_set_qset_resources(nic);
+ if (err)
+ goto err_free_netdev;
+
+ /* Check if PF is alive and get MAC address for this VF */
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ goto err_free_netdev;
+
+ nicvf_send_vf_struct(nic);
+
+ if (!pass1_silicon(nic->pdev))
+ nic->hw_tso = true;
+
+ /* Get iommu domain for iova to physical addr conversion */
+ nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
+ /* Check if this VF is in QS only mode */
+ if (nic->sqs_mode)
+ return 0;
+
+ err = nicvf_set_real_num_queues(netdev, nic->tx_queues, nic->rx_queues);
+ if (err)
+ goto err_unregister_interrupts;
+
+ netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_CTAG_RX);
+
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features;
+ netdev->hw_features |= NETIF_F_LOOPBACK;
+
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+
+ netdev->netdev_ops = &nicvf_netdev_ops;
+ netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
+
+ /* MTU range: 64 - 9200 */
+ netdev->min_mtu = NIC_HW_MIN_FRS;
+ netdev->max_mtu = NIC_HW_MAX_FRS;
+
+ INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+ nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d",
+ WQ_MEM_RECLAIM,
+ nic->vf_id);
+ if (!nic->nicvf_rx_mode_wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate work queue\n");
+ goto err_unregister_interrupts;
+ }
+
+ INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task);
+ spin_lock_init(&nic->rx_mode_wq_lock);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_destroy_workqueue;
+ }
+
+ nic->msg_enable = debug;
+
+ nicvf_set_ethtool_ops(netdev);
+
+ return 0;
+
+err_destroy_workqueue:
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
+err_unregister_interrupts:
+ nicvf_unregister_interrupts(nic);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nicvf *nic;
+ struct net_device *pnetdev;
+
+ if (!netdev)
+ return;
+
+ nic = netdev_priv(netdev);
+ pnetdev = nic->pnicvf->netdev;
+
+ /* Check if this Qset is assigned to different VF.
+ * If yes, clean primary and all secondary Qsets.
+ */
+ if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED))
+ unregister_netdev(pnetdev);
+ if (nic->nicvf_rx_mode_wq) {
+ destroy_workqueue(nic->nicvf_rx_mode_wq);
+ nic->nicvf_rx_mode_wq = NULL;
+ }
+ nicvf_unregister_interrupts(nic);
+ pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
+ cavium_ptp_put(nic->ptp_clock);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static void nicvf_shutdown(struct pci_dev *pdev)
+{
+ nicvf_remove(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+ .name = DRV_NAME,
+ .id_table = nicvf_id_table,
+ .probe = nicvf_probe,
+ .remove = nicvf_remove,
+ .shutdown = nicvf_shutdown,
+};
+
+static int __init nicvf_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+ return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+ pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 000000000..06397cc8b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1972 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/iommu.h>
+#include <net/ip.h>
+#include <net/tso.h>
+#include <uapi/linux/bpf.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data);
+static void nicvf_get_page(struct nicvf *nic)
+{
+ if (!nic->rb_pageref || !nic->rb_page)
+ return;
+
+ page_ref_add(nic->rb_page, nic->rb_pageref);
+ nic->rb_pageref = 0;
+}
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
+ &dmem->dma, GFP_KERNEL);
+ if (!dmem->unalign_base)
+ return -ENOMEM;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+ return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ dma_free_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base, dmem->dma);
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+#define XDP_PAGE_REFCNT_REFILL 256
+
+/* Allocate a new page or recycle one if possible
+ *
+ * We cannot optimize dma mapping here, since
+ * 1. It's only one RBDR ring for 8 Rx queues.
+ * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed
+ * and not idx into RBDR ring, so can't refer to saved info.
+ * 3. There are multiple receive buffers per page
+ */
+static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
+ struct rbdr *rbdr, gfp_t gfp)
+{
+ int ref_count;
+ struct page *page = NULL;
+ struct pgcache *pgcache, *next;
+
+ /* Check if page is already allocated */
+ pgcache = &rbdr->pgcache[rbdr->pgidx];
+ page = pgcache->page;
+ /* Check if page can be recycled */
+ if (page) {
+ ref_count = page_ref_count(page);
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
+ */
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
+ page = NULL;
+ }
+ }
+
+ if (!page) {
+ page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0);
+ if (!page)
+ return NULL;
+
+ this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
+
+ /* Check for space */
+ if (rbdr->pgalloc >= rbdr->pgcnt) {
+ /* Page can still be used */
+ nic->rb_page = page;
+ return NULL;
+ }
+
+ /* Save the page in page cache */
+ pgcache->page = page;
+ pgcache->dma_addr = 0;
+ pgcache->ref_count = 0;
+ rbdr->pgalloc++;
+ }
+
+ /* Take additional page references for recycling */
+ if (rbdr->is_xdp) {
+ /* Since there is single RBDR (i.e single core doing
+ * page recycling) per 8 Rx queues, in XDP mode adjusting
+ * page references atomically is the biggest bottleneck, so
+ * take bunch of references at a time.
+ *
+ * So here, below reference counts defer by '1'.
+ */
+ if (!pgcache->ref_count) {
+ pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
+ page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
+ }
+ } else {
+ /* In non-XDP case, single 64K page is divided across multiple
+ * receive buffers, so cost of recycling is less anyway.
+ * So we can do with just one extra reference.
+ */
+ page_ref_add(page, 1);
+ }
+
+ rbdr->pgidx++;
+ rbdr->pgidx &= (rbdr->pgcnt - 1);
+
+ /* Prefetch refcount of next page in page cache */
+ next = &rbdr->pgcache[rbdr->pgidx];
+ page = next->page;
+ if (page)
+ prefetch(&page->_refcount);
+
+ return pgcache;
+}
+
+/* Allocate buffer for packet reception */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
+ gfp_t gfp, u32 buf_len, u64 *rbuf)
+{
+ struct pgcache *pgcache = NULL;
+
+ /* Check if request can be accomodated in previous allocated page.
+ * But in XDP mode only one buffer per page is permitted.
+ */
+ if (!rbdr->is_xdp && nic->rb_page &&
+ ((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
+ nic->rb_pageref++;
+ goto ret;
+ }
+
+ nicvf_get_page(nic);
+ nic->rb_page = NULL;
+
+ /* Get new page, either recycled or new one */
+ pgcache = nicvf_alloc_page(nic, rbdr, gfp);
+ if (!pgcache && !nic->rb_page) {
+ this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
+ return -ENOMEM;
+ }
+
+ nic->rb_page_offset = 0;
+
+ /* Reserve space for header modifications by BPF program */
+ if (rbdr->is_xdp)
+ buf_len += XDP_PACKET_HEADROOM;
+
+ /* Check if it's recycled */
+ if (pgcache)
+ nic->rb_page = pgcache->page;
+ret:
+ if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
+ *rbuf = pgcache->dma_addr;
+ } else {
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ nic->rb_page_offset, buf_len,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, 0);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
+ if (pgcache)
+ pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
+ nic->rb_page_offset += buf_len;
+ }
+
+ return 0;
+}
+
+/* Build skb around receive buffer */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+ u64 rb_ptr, int len)
+{
+ void *data;
+ struct sk_buff *skb;
+
+ data = phys_to_virt(rb_ptr);
+
+ /* Now build an skb to give to stack */
+ skb = build_skb(data, RCV_FRAG_LEN);
+ if (!skb) {
+ put_page(virt_to_page(data));
+ return NULL;
+ }
+
+ prefetch(skb->data);
+ return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ u64 rbuf;
+ struct rbdr_entry_t *desc;
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+ rbdr->head = 0;
+ rbdr->tail = 0;
+
+ /* Initialize page recycling stuff.
+ *
+ * Can't use single buffer per page especially with 64K pages.
+ * On embedded platforms i.e 81xx/83xx available memory itself
+ * is low and minimum ring size of RBDR is 8K, that takes away
+ * lots of memory.
+ *
+ * But for XDP it has to be a single buffer per page.
+ */
+ if (!nic->pnicvf->xdp_prog) {
+ rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
+ rbdr->is_xdp = false;
+ } else {
+ rbdr->pgcnt = ring_len;
+ rbdr->is_xdp = true;
+ }
+ rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
+ rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache),
+ GFP_KERNEL);
+ if (!rbdr->pgcache)
+ return -ENOMEM;
+ rbdr->pgidx = 0;
+ rbdr->pgalloc = 0;
+
+ nic->rb_page = NULL;
+ for (idx = 0; idx < ring_len; idx++) {
+ err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
+ RCV_FRAG_LEN, &rbuf);
+ if (err) {
+ /* To free already allocated and mapped ones */
+ rbdr->tail = idx - 1;
+ return err;
+ }
+
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
+ }
+
+ nicvf_get_page(nic);
+
+ return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ int head, tail;
+ u64 buf_addr, phys_addr;
+ struct pgcache *pgcache;
+ struct rbdr_entry_t *desc;
+
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ head = rbdr->head;
+ tail = rbdr->tail;
+
+ /* Release page references */
+ while (head != tail) {
+ desc = GET_RBDR_DESC(rbdr, head);
+ buf_addr = desc->buf_addr;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
+ head++;
+ head &= (rbdr->dmem.q_len - 1);
+ }
+ /* Release buffer of tail desc */
+ desc = GET_RBDR_DESC(rbdr, tail);
+ buf_addr = desc->buf_addr;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
+
+ /* Sync page cache info */
+ smp_rmb();
+
+ /* Release additional page references held for recycling */
+ head = 0;
+ while (head < rbdr->pgcnt) {
+ pgcache = &rbdr->pgcache[head];
+ if (pgcache->page && page_ref_count(pgcache->page) != 0) {
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
+ }
+ put_page(pgcache->page);
+ }
+ head++;
+ }
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ int tail, qcount;
+ int refill_rb_cnt;
+ struct rbdr *rbdr;
+ struct rbdr_entry_t *desc;
+ u64 rbuf;
+ int new_rb = 0;
+
+refill:
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable)
+ goto next_rbdr;
+
+ /* Get no of desc's to be refilled */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ qcount &= 0x7FFFF;
+ /* Doorbell can be ringed with a max of ring size minus 1 */
+ if (qcount >= (qs->rbdr_len - 1))
+ goto next_rbdr;
+ else
+ refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+ /* Sync page cache info */
+ smp_rmb();
+
+ /* Start filling descs from tail */
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+ while (refill_rb_cnt) {
+ tail++;
+ tail &= (rbdr->dmem.q_len - 1);
+
+ if (nicvf_alloc_rcv_buffer(nic, rbdr, gfp, RCV_FRAG_LEN, &rbuf))
+ break;
+
+ desc = GET_RBDR_DESC(rbdr, tail);
+ desc->buf_addr = rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
+ refill_rb_cnt--;
+ new_rb++;
+ }
+
+ nicvf_get_page(nic);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Check if buffer allocation failed */
+ if (refill_rb_cnt)
+ nic->rb_alloc_fail = true;
+ else
+ nic->rb_alloc_fail = false;
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ rbdr_idx, new_rb);
+next_rbdr:
+ /* Re-enable RBDR interrupts only if buffer allocation is success */
+ if (!nic->rb_alloc_fail && rbdr->enable &&
+ netif_running(nic->pnicvf->netdev))
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+ if (rbdr_idx)
+ goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+ struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+ nicvf_refill_rbdr(nic, GFP_KERNEL);
+ if (nic->rb_alloc_fail)
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ else
+ nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(struct tasklet_struct *t)
+{
+ struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
+
+ nicvf_refill_rbdr(nic, GFP_ATOMIC);
+ if (nic->rb_alloc_fail) {
+ nic->rb_work_scheduled = true;
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ cq->desc = cq->dmem.base;
+ cq->thresh = pass1_silicon(nic->pdev) ? 0 : CMP_QUEUE_CQE_THRESH;
+ nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len, int qidx)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->skbuff)
+ return -ENOMEM;
+
+ sq->head = 0;
+ sq->tail = 0;
+ sq->thresh = SND_QUEUE_THRESH;
+
+ /* Check if this SQ is a XDP TX queue */
+ if (nic->sqs_mode)
+ qidx += ((nic->sqs_id + 1) * MAX_SND_QUEUES_PER_QS);
+ if (qidx < nic->pnicvf->xdp_tx_queues) {
+ /* Alloc memory to save page pointers for XDP_TX */
+ sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->xdp_page)
+ return -ENOMEM;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = q_len - 1;
+ sq->is_xdp = true;
+ } else {
+ sq->xdp_page = NULL;
+ sq->xdp_desc_cnt = 0;
+ sq->xdp_free_cnt = 0;
+ sq->is_xdp = false;
+
+ atomic_set(&sq->free_cnt, q_len - 1);
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys,
+ GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt)
+{
+ u8 idx;
+ struct sq_gather_subdesc *gather;
+
+ /* Unmap DMA mapped skb data buffers */
+ for (idx = 0; idx < subdesc_cnt; idx++) {
+ hdr_sqe++;
+ hdr_sqe &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
+ gather->size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ struct sk_buff *skb;
+ struct page *page;
+ struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
+
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ if (sq->tso_hdrs) {
+ dma_free_coherent(&nic->pdev->dev,
+ sq->dmem.q_len * TSO_HEADER_SIZE,
+ sq->tso_hdrs, sq->tso_hdrs_phys);
+ sq->tso_hdrs = NULL;
+ }
+
+ /* Free pending skbs in the queue */
+ smp_rmb();
+ while (sq->head != sq->tail) {
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (!skb || !sq->xdp_page)
+ goto next;
+
+ page = (struct page *)sq->xdp_page[sq->head];
+ if (!page)
+ goto next;
+ else
+ put_page(page);
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and unmap them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+ hdr->subdesc_cnt);
+ }
+ if (skb)
+ dev_kfree_skb_any(skb);
+next:
+ sq->head++;
+ sq->head &= (sq->dmem.q_len - 1);
+ }
+ kfree(sq->skbuff);
+ kfree(sq->xdp_page);
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+void nicvf_config_vlan_stripping(struct nicvf *nic, netdev_features_t features)
+{
+ u64 rq_cfg;
+ int sqs;
+
+ rq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_RQ_GEN_CFG, 0);
+
+ /* Enable first VLAN stripping */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ rq_cfg |= (1ULL << 25);
+ else
+ rq_cfg &= ~(1ULL << 25);
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+
+ /* Configure Secondary Qsets, if any */
+ for (sqs = 0; sqs < nic->sqs_count; sqs++)
+ if (nic->snicvf[sqs])
+ nicvf_queue_reg_write(nic->snicvf[sqs],
+ NIC_QSET_RQ_GEN_CFG, 0, rq_cfg);
+}
+
+static void nicvf_reset_rcv_queue_stats(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ /* Reset all RQ/SQ and VF stats */
+ mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = 0x3FFF;
+ mbx.reset_stat.tx_stat_mask = 0x1F;
+ mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ mbx.reset_stat.sq_stat_mask = 0xFFFF;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ struct rq_cfg rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ xdp_rxq_info_unreg(&rq->xdp_rxq);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Driver have no proper error path for failed XDP RX-queue info reg */
+ WARN_ON(xdp_rxq_info_reg(&rq->xdp_rxq, nic->netdev, qidx, 0) < 0);
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
+ (RQ_PASS_RBDR_LVL << 16) | (RQ_PASS_CQ_LVL << 8) |
+ (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = BIT_ULL(63) | BIT_ULL(62) |
+ (RQ_PASS_RBDR_LVL << 40) | (RQ_DROP_RBDR_LVL << 32) |
+ (RQ_PASS_CQ_LVL << 16) | (RQ_DROP_CQ_LVL << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ if (!nic->sqs_mode && (qidx == 0)) {
+ /* Enable checking L3/L4 length and TCP/UDP checksums
+ * Also allow IPv6 pkts with zero UDP checksum.
+ */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+ (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
+ nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ }
+
+ /* Enable Receive queue */
+ memset(&rq_cfg, 0, sizeof(struct rq_cfg));
+ rq_cfg.ena = 1;
+ rq_cfg.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ struct cq_cfg cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ spin_lock_init(&cq->lock);
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ memset(&cq_cfg, 0, sizeof(struct cq_cfg));
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ cq_cfg.caching = 0;
+ cq_cfg.qsize = ilog2(qs->cq_len >> 10);
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+ qidx, CMP_QUEUE_TIMER_THRESH);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+ struct sq_cfg sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.sqs_mode = nic->sqs_mode;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ memset(&sq_cfg, 0, sizeof(struct sq_cfg));
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = ilog2(qs->sq_len >> 10);
+ sq_cfg.tstmp_bgx_intf = 0;
+ /* CQ's level at which HW will stop processing SQEs to avoid
+ * transmitting a pkt with no space in CQ to post CQE_TX.
+ */
+ sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+ /* Set queue:cpu affinity for better load distribution */
+ if (cpu_online(qidx)) {
+ cpumask_set_cpu(qidx, &sq->affinity_mask);
+ netif_set_xps_queue(nic->netdev,
+ &sq->affinity_mask, qidx);
+ }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ struct rbdr_cfg rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = RBDR_SIZE;
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, *(u64 *)&rbdr_cfg);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ netdev_warn(nic->netdev,
+ "Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+ mbx.qs.sqs_count = nic->sqs_count;
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ /* Enable Tx timestamping capability */
+ if (nic->ptp_clock)
+ qs_cfg->send_tstmp_ena = 1;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = DEFAULT_RBDR_CNT;
+ qs->rq_cnt = min_t(u8, MAX_RCV_QUEUES_PER_QS, num_online_cpus());
+ qs->sq_cnt = min_t(u8, MAX_SND_QUEUES_PER_QS, num_online_cpus());
+ qs->cq_cnt = max_t(u8, qs->rq_cnt, qs->sq_cnt);
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+
+ nic->rx_queues = qs->rq_cnt;
+ nic->tx_queues = qs->sq_cnt;
+ nic->xdp_tx_queues = 0;
+
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ struct queue_set *pqs = nic->pnicvf->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ /* Take primary VF's queue lengths.
+ * This is needed to take queue lengths set from ethtool
+ * into consideration.
+ */
+ if (nic->sqs_mode && pqs) {
+ qs->cq_len = pqs->cq_len;
+ qs->sq_len = pqs->sq_len;
+ }
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ /* Reset RXQ's stats.
+ * SQ's stats will get reset automatically once SQ is reset.
+ */
+ nicvf_reset_rcv_queue_stats(nic);
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ if (!sq->is_xdp)
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt -= desc_cnt;
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+ int qentry, int desc_cnt)
+{
+ sq->tail = qentry;
+ atomic_add(desc_cnt, &sq->free_cnt);
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ if (!sq->is_xdp)
+ atomic_add(desc_cnt, &sq->free_cnt);
+ else
+ sq->xdp_free_cnt += desc_cnt;
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head;
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
+ atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+ atomic64_add(hdr->tot_len,
+ (atomic64_t *)&netdev->stats.tx_bytes);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* XDP Transmit APIs */
+void nicvf_xdp_sq_doorbell(struct nicvf *nic,
+ struct snd_queue *sq, int sq_num)
+{
+ if (!sq->xdp_desc_cnt)
+ return;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, sq->xdp_desc_cnt);
+ sq->xdp_desc_cnt = 0;
+}
+
+static inline void
+nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, u64 data, int len)
+{
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+ hdr->post_cqe = 1;
+ sq->xdp_page[qentry] = (u64)virt_to_page((void *)data);
+}
+
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+ int qentry;
+
+ if (subdesc_cnt > sq->xdp_free_cnt)
+ return 0;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr);
+
+ sq->xdp_desc_cnt += subdesc_cnt;
+
+ return 1;
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_tcp_all_headers(skb);
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = skb_frag_size(&sh->frags[f_id]);
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+ return num_edescs + sh->gso_segs;
+}
+
+#define POST_CQE_DESC_COUNT 2
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso) {
+ subdesc_cnt = nicvf_tso_count_subdescs(skb);
+ return subdesc_cnt;
+ }
+
+ /* Dummy descriptors to get TSO pkt completion notification */
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+ subdesc_cnt += POST_CQE_DESC_COUNT;
+
+ if (skb_shinfo(skb)->nr_frags)
+ subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
+{
+ int proto;
+ struct sq_hdr_subdesc *hdr;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+
+ ip.hdr = skb_network_header(skb);
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+ * segment transmitted on 88xx.
+ */
+ hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+ } else {
+ sq->skbuff[qentry] = (u64)skb;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ }
+ hdr->tot_len = len;
+
+ /* Offload checksum calculation to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (ip.v4->version == 4)
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ hdr->l3_offset = skb_network_offset(skb);
+ hdr->l4_offset = skb_transport_offset(skb);
+
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
+ switch (proto) {
+ case IPPROTO_TCP:
+ hdr->csum_l4 = SEND_L4_CSUM_TCP;
+ break;
+ case IPPROTO_UDP:
+ hdr->csum_l4 = SEND_L4_CSUM_UDP;
+ break;
+ case IPPROTO_SCTP:
+ hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+ break;
+ }
+ }
+
+ if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ hdr->tso = 1;
+ hdr->tso_start = skb_tcp_all_headers(skb);
+ hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
+ /* For non-tunneled pkts, point this to L2 ethertype */
+ hdr->inner_l3_offset = skb_network_offset(skb) - 2;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
+ }
+
+ /* Check if timestamp is requested */
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ skb_tx_timestamp(skb);
+ return;
+ }
+
+ /* Tx timestamping not supported along with TSO, so ignore request */
+ if (skb_shinfo(skb)->gso_size)
+ return;
+
+ /* HW supports only a single outstanding packet to timestamp */
+ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1))
+ return;
+
+ /* Mark the SKB for later reference */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ /* Finally enable timestamp generation
+ * Since 'post_cqe' is also set, two CQEs will be posted
+ * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
+ */
+ hdr->tstmp = 1;
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
+ gather->size = size;
+ gather->addr = data;
+}
+
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+ int tso_sqe, struct sk_buff *skb)
+{
+ struct sq_imm_subdesc *imm;
+ struct sq_hdr_subdesc *hdr;
+
+ sq->skbuff[qentry] = (u64)skb;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* There is no packet to transmit here */
+ hdr->dont_send = 1;
+ hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+ hdr->tot_len = 1;
+ /* Actual TSO header SQE index, needed for cleanup */
+ hdr->rsvd2 = tso_sqe;
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(imm, 0, SND_QUEUE_DESC_SIZE);
+ imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+ imm->len = 1;
+}
+
+static inline void nicvf_sq_doorbell(struct nicvf *nic, struct sk_buff *skb,
+ int sq_num, int desc_cnt)
+{
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(nic->pnicvf->netdev,
+ skb_get_queue_mapping(skb));
+
+ netdev_tx_sent_queue(txq, skb->len);
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, desc_cnt);
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ int sq_num, int qentry, struct sk_buff *skb)
+{
+ struct tso_t tso;
+ int seg_subdescs = 0, desc_cnt = 0;
+ int seg_len, total_len, data_left;
+ int hdr_qentry = qentry;
+ int hdr_len;
+
+ hdr_len = tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Save Qentry for adding HDR_SUBDESC at the end */
+ hdr_qentry = qentry;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* Add segment's header */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+ sq->tso_hdrs_phys +
+ qentry * TSO_HEADER_SIZE);
+ /* HDR_SUDESC + GATHER */
+ seg_subdescs = 2;
+ seg_len = hdr_len;
+
+ /* Add segment's payload fragments */
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(tso.data));
+ seg_subdescs++;
+ seg_len += size;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
+ sq->skbuff[hdr_qentry] = (u64)NULL;
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+ desc_cnt += seg_subdescs;
+ }
+ /* Save SKB in the last segment for freeing */
+ sq->skbuff[hdr_qentry] = (u64)skb;
+
+ nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
+
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
+ return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
+ struct sk_buff *skb, u8 sq_num)
+{
+ int i, size;
+ int subdesc_cnt, hdr_sqe = 0;
+ int qentry;
+ u64 dma_addr;
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ if (subdesc_cnt > atomic_read(&sq->free_cnt))
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Check if its a TSO packet */
+ if (skb_shinfo(skb)->gso_size && !nic->hw_tso)
+ return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
+ skb, skb->len);
+ hdr_sqe = qentry;
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
+ offset_in_page(skb->data), size,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
+
+ /* Check for scattered buffer */
+ if (!skb_is_nonlinear(skb))
+ goto doorbell;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_frag_size(frag);
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev,
+ skb_frag_page(frag),
+ skb_frag_off(frag), size,
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ /* Free entire chain of mapped buffers
+ * here 'i' = frags mapped + above mapped skb->data
+ */
+ nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
+ }
+
+doorbell:
+ if (nic->t88 && skb_shinfo(skb)->gso_size) {
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
+ }
+
+ nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
+
+ return 1;
+
+append_fail:
+ /* Use original PCI dev for debug log */
+ nic = nic->pnicvf;
+ netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
+ u64 buf_addr, bool xdp)
+{
+ struct page *page = NULL;
+ int len = RCV_FRAG_LEN;
+
+ if (xdp) {
+ page = virt_to_page(phys_to_virt(buf_addr));
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) != 1)
+ return;
+
+ len += XDP_PACKET_HEADROOM;
+ /* Receive buffers in XDP mode are mapped from page start */
+ dma_addr &= PAGE_MASK;
+ }
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp)
+{
+ int frag;
+ int payload_len = 0;
+ struct sk_buff *skb = NULL;
+ struct page *page;
+ int offset;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+ u64 phys_addr;
+
+ rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+ /* Except 88xx pass1 on all other chips CQE_RX2_S is added to
+ * CQE_RX at word6, hence buffer pointers move by word
+ *
+ * Use existing 'hw_tso' flag which will be set for all chips
+ * except 88xx pass1 instead of a additional cache line
+ * access (or miss) by using pci dev's revision.
+ */
+ if (!nic->hw_tso)
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+ else
+ rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
+
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+ phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+ if (!phys_addr) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
+ if (!frag) {
+ /* First fragment */
+ nicvf_unmap_rcv_buffer(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ phys_addr, xdp);
+ skb = nicvf_rb_ptr_to_skb(nic,
+ phys_addr - cqe_rx->align_pad,
+ payload_len);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, cqe_rx->align_pad);
+ skb_put(skb, payload_len);
+ } else {
+ /* Add fragments */
+ nicvf_unmap_rcv_buffer(nic, *rb_ptrs, phys_addr, xdp);
+ page = virt_to_page(phys_to_virt(phys_addr));
+ offset = phys_to_virt(phys_addr) - page_address(page);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset, payload_len, RCV_FRAG_LEN);
+ }
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return skb;
+}
+
+static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
+{
+ u64 reg_val;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ reg_val = 0;
+ }
+
+ return reg_val;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ return;
+ }
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S,
+ nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to disable interrupt: unknown type\n");
+ return;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to clear interrupt: unknown type\n");
+ return;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, mask);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
+ /* If interrupt type is unknown, we treat it disabled. */
+ if (!mask) {
+ netdev_dbg(nic->netdev,
+ "Failed to check interrupt enable: unknown type\n");
+ return 0;
+ }
+
+ return mask & nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+ netif_err(nic, rx_err, nic->netdev,
+ "RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ cqe_rx->err_level, cqe_rx->err_opcode);
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ this_cpu_inc(nic->drv_stats->rx_jabber_errs);
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ this_cpu_inc(nic->drv_stats->rx_fcs_errs);
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ this_cpu_inc(nic->drv_stats->rx_bgx_errs);
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ this_cpu_inc(nic->drv_stats->rx_prel2_errs);
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ this_cpu_inc(nic->drv_stats->rx_oversize);
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ this_cpu_inc(nic->drv_stats->rx_undersize);
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ this_cpu_inc(nic->drv_stats->rx_l2_pclp);
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ this_cpu_inc(nic->drv_stats->rx_l3_pclp);
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ this_cpu_inc(nic->drv_stats->rx_l4_malformed);
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ this_cpu_inc(nic->drv_stats->rx_l4_pclp);
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
+{
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_DESC_FAULT:
+ this_cpu_inc(nic->drv_stats->tx_desc_fault);
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ this_cpu_inc(nic->drv_stats->tx_subdesc_err);
+ break;
+ case CQ_TX_ERROP_MAX_SIZE_VIOL:
+ this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ this_cpu_inc(nic->drv_stats->tx_data_seq_err);
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ this_cpu_inc(nic->drv_stats->tx_lock_viol);
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ this_cpu_inc(nic->drv_stats->tx_data_fault);
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ this_cpu_inc(nic->drv_stats->tx_mem_fault);
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ this_cpu_inc(nic->drv_stats->tx_csum_overlap);
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ this_cpu_inc(nic->drv_stats->tx_csum_overflow);
+ break;
+ }
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 000000000..8453defc2
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include <linux/iommu.h>
+#include <net/xdp.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define for_each_cq_irq(irq) \
+ for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define for_each_sq_irq(irq) \
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define for_each_rbdr_irq(irq) \
+ for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define DEFAULT_RBDR_CNT 1
+
+#define SND_QSIZE SND_QUEUE_SIZE0
+#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
+#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT 1
+
+/* Keep CQ and SQ sizes same, if timestamping
+ * is enabled this equation will change.
+ */
+#define CMP_QSIZE CMP_QUEUE_SIZE0
+#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
+#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
+#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
+#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
+
+/* No of CQEs that might anyway gets used by HW due to pipelining
+ * effects irrespective of PASS/DROP/LEVELS being configured
+ */
+#define CMP_QUEUE_PIPELINE_RSVD 544
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ MAX_CQE_PER_PKT_XMIT)
+
+/* RED and Backpressure levels of CQ for pkt reception
+ * For CQ, level is a measure of emptiness i.e 0x0 means full
+ * eg: For CQ of size 4K, and for pass/drop levels of 160/144
+ * HW accepts pkt if unused CQE >= 2560
+ * RED accepts pkt if unused CQE < 2304 & >= 2560
+ * DROPs pkts if unused CQE < 2304
+ */
+#define RQ_PASS_CQ_LVL 224ULL
+#define RQ_DROP_CQ_LVL 216ULL
+
+/* RED and Backpressure levels of RBDR for pkt reception
+ * For RBDR, level is a measure of fullness i.e 0x0 means empty
+ * eg: For RBDR of size 8K, and for pass/drop levels of 4/0
+ * HW accepts pkt if unused RBs >= 256
+ * RED accepts pkt if unused RBs < 256 & >= 0
+ * DROPs pkts if unused RBs < 0
+ */
+#define RQ_PASS_RBDR_LVL 8ULL
+#define RQ_DROP_RBDR_LVL 0ULL
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE 16
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+ dma_addr_t dma;
+ u64 size;
+ u32 q_len;
+ dma_addr_t phys_base;
+ void *base;
+ void *unalign_base;
+};
+
+struct pgcache {
+ struct page *page;
+ int ref_count;
+ u64 dma_addr;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 frag_len;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+ bool is_xdp;
+
+ /* For page recycling */
+ int pgidx;
+ int pgcnt;
+ int pgalloc;
+ struct pgcache *pgcache;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+ struct xdp_rxq_info xdp_rxq;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+ bool enable;
+ u16 thresh;
+ spinlock_t lock; /* lock to serialize processing CQEs */
+ void *desc;
+ struct q_desc_mem dmem;
+ int irq;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ atomic_t free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+ u64 *xdp_page;
+ u16 xdp_desc_cnt;
+ u16 xdp_free_cnt;
+ bool is_xdp;
+
+ /* For TSO segment's header */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_phys;
+
+ cpumask_t affinity_mask;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt);
+void nicvf_config_vlan_stripping(struct nicvf *nic,
+ netdev_features_t features);
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
+ struct sk_buff *skb, u8 sq_num);
+int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
+ u64 bufaddr, u64 dma_addr, u16 len);
+void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
+ struct cqe_rx_t *cqe_rx, bool xdp);
+void nicvf_rbdr_task(struct tasklet_struct *t);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic,
+ u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 000000000..0df115d42
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,696 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_HASH_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+ u64 buf_addr;
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 csum_inner_l4:2;
+ u64 csum_inner_l3:1;
+ u64 rsvd0:2;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 rsvd2:24;
+ u64 inner_l4_offset:8;
+ u64 inner_l3_offset:8;
+ u64 tso_start:8;
+ u64 rsvd3:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:2;
+ u64 csum_inner_l3:1;
+ u64 csum_inner_l4:2;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd3:2;
+ u64 tso_start:8;
+ u64 inner_l3_offset:8;
+ u64 inner_l4_offset:8;
+ u64 rsvd2:24; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 cq_limit:8;
+ u64 reserved_20_23:4;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_23:4;
+ u64 cq_limit:8;
+ u64 reserved_32_63:32;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 000000000..7eb2ddbe9
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,1718 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder_bgx"
+#define DRV_VERSION "1.0"
+
+/* RX_DMAC_CTL configuration */
+enum MCAST_MODE {
+ MCAST_MODE_REJECT = 0x0,
+ MCAST_MODE_ACCEPT = 0x1,
+ MCAST_MODE_CAM_FILTER = 0x2,
+ RSVD = 0x3
+};
+
+#define BCAST_ACCEPT BIT(0)
+#define CAM_ACCEPT BIT(3)
+#define MCAST_MODE_MASK 0x3
+#define BGX_MCAST_MODE(x) (x << 1)
+
+struct dmac_map {
+ u64 vf_map;
+ u64 dmac;
+};
+
+struct lmac {
+ struct bgx *bgx;
+ /* actual number of DMACs configured */
+ u8 dmacs_cfg;
+ /* overal number of possible DMACs could be configured per LMAC */
+ u8 dmacs_count;
+ struct dmac_map *dmacs; /* DMAC:VFs tracking filter array */
+ u8 mac[ETH_ALEN];
+ u8 lmac_type;
+ u8 lane_to_sds;
+ bool use_training;
+ bool autoneg;
+ bool link_up;
+ int lmacid; /* ID within BGX */
+ int lmacid_bd; /* ID on board */
+ struct net_device netdev;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ bool is_sgmii;
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+};
+
+struct bgx {
+ u8 bgx_id;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ u8 lmac_count;
+ u8 max_lmac;
+ u8 acpi_lmac_idx;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ bool is_dlm;
+ bool is_rgx;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_RGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+ int timeout = 100;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ return 1;
+}
+
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+ u16 sdevid;
+
+ if (max_bgx_per_node)
+ return;
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_81XX_BGX:
+ case PCI_SUBSYS_DEVID_81XX_RGX:
+ max_bgx_per_node = MAX_BGX_PER_CN81XX;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN83XX;
+ break;
+ case PCI_SUBSYS_DEVID_88XX_BGX:
+ default:
+ max_bgx_per_node = MAX_BGX_PER_CN88XX;
+ break;
+ }
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+ int idx = (node * max_bgx_per_node) + bgx_idx;
+
+ return bgx_vnic[idx];
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+ int i;
+ unsigned map = 0;
+
+ for (i = 0; i < max_bgx_per_node; i++) {
+ if (bgx_vnic[(node * max_bgx_per_node) + i])
+ map |= (1 << i);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+ struct bgx_link_status *link = (struct bgx_link_status *)status;
+ struct bgx *bgx;
+ struct lmac *lmac;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ link->mac_type = lmac->lmac_type;
+ link->link_up = lmac->link_up;
+ link->duplex = lmac->last_duplex;
+ link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+
+ if (!bgx)
+ return;
+
+ ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_flush_dmac_cam_filter(struct bgx *bgx, int lmacid)
+{
+ struct lmac *lmac = NULL;
+ u8 idx = 0;
+
+ lmac = &bgx->lmac[lmacid];
+ /* reset CAM filters */
+ for (idx = 0; idx < lmac->dmacs_count; idx++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
+ ((lmacid * lmac->dmacs_count) + idx) *
+ sizeof(u64), 0);
+}
+
+static void bgx_lmac_remove_filters(struct lmac *lmac, u8 vf_id)
+{
+ int i = 0;
+
+ if (!lmac)
+ return;
+
+ /* We've got reset filters request from some of attached VF, while the
+ * others might want to keep their configuration. So in this case lets
+ * iterate over all of configured filters and decrease number of
+ * referencies. if some addresses get zero refs remove them from list
+ */
+ for (i = lmac->dmacs_cfg - 1; i >= 0; i--) {
+ lmac->dmacs[i].vf_map &= ~BIT_ULL(vf_id);
+ if (!lmac->dmacs[i].vf_map) {
+ lmac->dmacs_cfg--;
+ lmac->dmacs[i].dmac = 0;
+ lmac->dmacs[i].vf_map = 0;
+ }
+ }
+}
+
+static int bgx_lmac_save_filter(struct lmac *lmac, u64 dmac, u8 vf_id)
+{
+ u8 i = 0;
+
+ if (!lmac)
+ return -1;
+
+ /* At the same time we could have several VFs 'attached' to some
+ * particular LMAC, and each VF is represented as network interface
+ * for kernel. So from user perspective it should be possible to
+ * manipulate with its' (VF) receive modes. However from PF
+ * driver perspective we need to keep track of filter configurations
+ * for different VFs to prevent filter values dupes
+ */
+ for (i = 0; i < lmac->dmacs_cfg; i++) {
+ if (lmac->dmacs[i].dmac == dmac) {
+ lmac->dmacs[i].vf_map |= BIT_ULL(vf_id);
+ return -1;
+ }
+ }
+
+ if (!(lmac->dmacs_cfg < lmac->dmacs_count))
+ return -1;
+
+ /* keep it for further tracking */
+ lmac->dmacs[lmac->dmacs_cfg].dmac = dmac;
+ lmac->dmacs[lmac->dmacs_cfg].vf_map = BIT_ULL(vf_id);
+ lmac->dmacs_cfg++;
+ return 0;
+}
+
+static int bgx_set_dmac_cam_filter_mac(struct bgx *bgx, int lmacid,
+ u64 cam_dmac, u8 idx)
+{
+ struct lmac *lmac = NULL;
+ u64 cfg = 0;
+
+ /* skip zero addresses as meaningless */
+ if (!cam_dmac || !bgx)
+ return -1;
+
+ lmac = &bgx->lmac[lmacid];
+
+ /* configure DCAM filtering for designated LMAC */
+ cfg = RX_DMACX_CAM_LMACID(lmacid & LMAC_ID_MASK) |
+ RX_DMACX_CAM_EN | cam_dmac;
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM +
+ ((lmacid * lmac->dmacs_count) + idx) * sizeof(u64), cfg);
+ return 0;
+}
+
+void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid,
+ u64 cam_dmac, u8 vf_id)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac = NULL;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (!cam_dmac)
+ cam_dmac = ether_addr_to_u64(lmac->mac);
+
+ /* since we might have several VFs attached to particular LMAC
+ * and kernel could call mcast config for each of them with the
+ * same MAC, check if requested MAC is already in filtering list and
+ * updare/prepare list of MACs to be applied later to HW filters
+ */
+ bgx_lmac_save_filter(lmac, cam_dmac, vf_id);
+}
+EXPORT_SYMBOL(bgx_set_dmac_cam_filter);
+
+void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac = NULL;
+ u64 cfg = 0;
+ u8 i = 0;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL);
+ if (mode & BGX_XCAST_BCAST_ACCEPT)
+ cfg |= BCAST_ACCEPT;
+ else
+ cfg &= ~BCAST_ACCEPT;
+
+ /* disable all MCASTs and DMAC filtering */
+ cfg &= ~(CAM_ACCEPT | BGX_MCAST_MODE(MCAST_MODE_MASK));
+
+ /* check requested bits and set filtergin mode appropriately */
+ if (mode & (BGX_XCAST_MCAST_ACCEPT)) {
+ cfg |= (BGX_MCAST_MODE(MCAST_MODE_ACCEPT));
+ } else if (mode & BGX_XCAST_MCAST_FILTER) {
+ cfg |= (BGX_MCAST_MODE(MCAST_MODE_CAM_FILTER) | CAM_ACCEPT);
+ for (i = 0; i < lmac->dmacs_cfg; i++)
+ bgx_set_dmac_cam_filter_mac(bgx, lmacid,
+ lmac->dmacs[i].dmac, i);
+ }
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, cfg);
+}
+EXPORT_SYMBOL(bgx_set_xcast_mode);
+
+void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf_id)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+
+ if (!bgx)
+ return;
+
+ bgx_lmac_remove_filters(&bgx->lmac[lmacid], vf_id);
+ bgx_flush_dmac_cam_filter(bgx, lmacid);
+ bgx_set_xcast_mode(node, bgx_idx, lmacid,
+ (BGX_XCAST_BCAST_ACCEPT | BGX_XCAST_MCAST_ACCEPT));
+}
+EXPORT_SYMBOL(bgx_reset_xcast_mode);
+
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!bgx)
+ return;
+ lmac = &bgx->lmac[lmacid];
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ if (enable) {
+ cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
+
+ /* enable TX FIFO Underflow interrupt */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
+ GMI_TXX_INT_UNDFLW);
+ } else {
+ cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Disable TX FIFO Underflow interrupt */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
+ GMI_TXX_INT_UNDFLW);
+ }
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ if (bgx->is_rgx)
+ xcv_setup_link(enable ? lmac->link_up : 0, lmac->last_speed);
+}
+EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
+
+/* Enables or disables timestamp insertion by BGX for Rx packets */
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable)
+{
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 csr_offset, cfg;
+
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+
+ if (lmac->lmac_type == BGX_MODE_SGMII ||
+ lmac->lmac_type == BGX_MODE_QSGMII ||
+ lmac->lmac_type == BGX_MODE_RGMII)
+ csr_offset = BGX_GMP_GMI_RXX_FRM_CTL;
+ else
+ csr_offset = BGX_SMUX_RX_FRM_CTL;
+
+ cfg = bgx_reg_read(bgx, lmacid, csr_offset);
+
+ if (enable)
+ cfg |= BGX_PKT_RX_PTP_EN;
+ else
+ cfg &= ~BGX_PKT_RX_PTP_EN;
+ bgx_reg_write(bgx, lmacid, csr_offset, cfg);
+}
+EXPORT_SYMBOL(bgx_config_timestamping);
+
+void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
+{
+ struct pfc *pfc = (struct pfc *)pause;
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!bgx)
+ return;
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->is_sgmii)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
+ pfc->fc_rx = cfg & RX_EN;
+ pfc->fc_tx = cfg & TX_EN;
+ pfc->autoneg = 0;
+}
+EXPORT_SYMBOL(bgx_lmac_get_pfc);
+
+void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
+{
+ struct pfc *pfc = (struct pfc *)pause;
+ struct bgx *bgx = get_bgx(node, bgx_idx);
+ struct lmac *lmac;
+ u64 cfg;
+
+ if (!bgx)
+ return;
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->is_sgmii)
+ return;
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_CBFC_CTL);
+ cfg &= ~(RX_EN | TX_EN);
+ cfg |= (pfc->fc_rx ? RX_EN : 0x00);
+ cfg |= (pfc->fc_tx ? TX_EN : 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, cfg);
+}
+EXPORT_SYMBOL(bgx_lmac_set_pfc);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ u64 cmr_cfg;
+ u64 port_cfg = 0;
+ u64 misc_ctl = 0;
+ bool tx_en, rx_en;
+
+ cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+ tx_en = cmr_cfg & CMR_PKT_TX_EN;
+ rx_en = cmr_cfg & CMR_PKT_RX_EN;
+ cmr_cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI RX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG,
+ GMI_PORT_CFG_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "BGX%d LMAC%d GMI TX not idle\n",
+ bgx->bgx_id, lmac->lmacid);
+ return;
+ }
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+ misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+ if (lmac->link_up) {
+ misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+ port_cfg |= (lmac->last_duplex << 2);
+ } else {
+ misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ switch (lmac->last_speed) {
+ case 10:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 50; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 5; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 1; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 0);
+ else
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 8192);
+ break;
+ default:
+ break;
+ }
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+ /* Restore CMR config settings */
+ cmr_cfg |= (rx_en ? CMR_PKT_RX_EN : 0) | (tx_en ? CMR_PKT_TX_EN : 0);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ if (bgx->is_rgx && (cmr_cfg & (CMR_PKT_RX_EN | CMR_PKT_TX_EN)))
+ xcv_setup_link(lmac->link_up, lmac->last_speed);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+ struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+ struct phy_device *phydev;
+ int link_changed = 0;
+
+ phydev = lmac->phydev;
+
+ if (!phydev->link && lmac->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (lmac->last_duplex != phydev->duplex ||
+ lmac->last_link != phydev->link ||
+ lmac->last_speed != phydev->speed)) {
+ link_changed = 1;
+ }
+
+ lmac->last_link = phydev->link;
+ lmac->last_speed = phydev->speed;
+ lmac->last_duplex = phydev->duplex;
+
+ if (!link_changed)
+ return;
+
+ if (link_changed > 0)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
+
+ if (lmac->is_sgmii)
+ bgx_sgmii_change_link_state(lmac);
+ else
+ bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (!bgx)
+ return 0;
+
+ if (idx > 8)
+ lmac = 0;
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (!bgx)
+ return 0;
+
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+/* Configure BGX LMAC in internal loopback mode */
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable)
+{
+ struct bgx *bgx;
+ struct lmac *lmac;
+ u64 cfg;
+
+ bgx = get_bgx(node, bgx_idx);
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmac_idx];
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
+ if (enable)
+ cfg |= PCS_MRX_CTL_LOOPBACK1;
+ else
+ cfg &= ~PCS_MRX_CTL_LOOPBACK1;
+ bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
+ } else {
+ cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
+ if (enable)
+ cfg |= SPU_CTL_LOOPBACK;
+ else
+ cfg &= ~SPU_CTL_LOOPBACK;
+ bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
+ }
+}
+EXPORT_SYMBOL(bgx_lmac_internal_loopback);
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, struct lmac *lmac)
+{
+ int lmacid = lmac->lmacid;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+ cfg |= PCS_MRX_CTL_RST_AN;
+ if (lmac->phydev) {
+ cfg |= PCS_MRX_CTL_AN_EN;
+ } else {
+ /* In scenarios where PHY driver is not present or it's a
+ * non-standard PHY, FW sets AN_EN to inform Linux driver
+ * to do auto-neg and link polling or not.
+ */
+ if (cfg & PCS_MRX_CTL_AN_EN)
+ lmac->autoneg = true;
+ }
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ if (lmac->lmac_type == BGX_MODE_QSGMII) {
+ /* Disable disparity check for QSGMII */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL);
+ cfg &= ~PCS_MISC_CTL_DISP_EN;
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MISCX_CTL, cfg);
+ return 0;
+ }
+
+ if ((lmac->lmac_type == BGX_MODE_SGMII) && lmac->phydev) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, struct lmac *lmac)
+{
+ u64 cfg;
+ int lmacid = lmac->lmacid;
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (lmac->lmac_type == BGX_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_INTLV_RDISP);
+
+ /* Clear receive packet disable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (lmac->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ if (lmac->lmac_type == BGX_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (lmac->lmac_type == BGX_MODE_40G_KR)
+ cfg |= (1 << 24);
+ else
+ cfg &= ~((1 << 23) | (1 << 24));
+ cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* Enable receive and transmission of pause frames */
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_CBFC_CTL, ((0xffffULL << 32) |
+ BCK_EN | DRP_EN | TX_EN | RX_EN));
+ /* Configure pause time and interval */
+ bgx_reg_write(bgx, lmacid,
+ BGX_SMUX_TX_PAUSE_PKT_TIME, DEFAULT_PAUSE_TIME);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL);
+ cfg &= ~0xFFFFull;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_PKT_INTERVAL,
+ cfg | (DEFAULT_PAUSE_TIME - 0x1000));
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_PAUSE_ZERO, 0x01);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = lmac->lmac_type;
+ u64 cfg;
+
+ if (lmac->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+ (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (lmac->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+ return -1;
+ }
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmac);
+
+ return -1;
+}
+
+static void bgx_poll_for_sgmii_link(struct lmac *lmac)
+{
+ u64 pcs_link, an_result;
+ u8 speed;
+
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ /*Link state bit is sticky, read it again*/
+ if (!(pcs_link & PCS_MRX_STATUS_LINK))
+ pcs_link = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_MRX_STATUS);
+
+ if (bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ goto next_poll;
+ }
+
+ lmac->link_up = ((pcs_link & PCS_MRX_STATUS_LINK) != 0) ? true : false;
+ an_result = bgx_reg_read(lmac->bgx, lmac->lmacid,
+ BGX_GMP_PCS_ANX_AN_RESULTS);
+
+ speed = (an_result >> 3) & 0x3;
+ lmac->last_duplex = (an_result >> 1) & 0x1;
+ switch (speed) {
+ case 0:
+ lmac->last_speed = SPEED_10;
+ break;
+ case 1:
+ lmac->last_speed = SPEED_100;
+ break;
+ case 2:
+ lmac->last_speed = SPEED_1000;
+ break;
+ default:
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ break;
+ }
+
+next_poll:
+
+ if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up)
+ bgx_sgmii_change_link_state(lmac);
+ lmac->last_link = lmac->link_up;
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 3);
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+ struct lmac *lmac;
+ u64 spu_link, smu_link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+ if (lmac->is_sgmii) {
+ bgx_poll_for_sgmii_link(lmac);
+ return;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
+ lmac->link_up = true;
+ if (lmac->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = SPEED_40000;
+ else
+ lmac->last_speed = SPEED_10000;
+ lmac->last_duplex = DUPLEX_FULL;
+ } else {
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+
+ if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = false;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
+ lmac->last_link = lmac->link_up;
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int phy_interface_mode(u8 lmac_type)
+{
+ if (lmac_type == BGX_MODE_QSGMII)
+ return PHY_INTERFACE_MODE_QSGMII;
+ if (lmac_type == BGX_MODE_RGMII)
+ return PHY_INTERFACE_MODE_RGMII_RXID;
+
+ return PHY_INTERFACE_MODE_SGMII;
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ lmac->bgx = bgx;
+
+ if ((lmac->lmac_type == BGX_MODE_SGMII) ||
+ (lmac->lmac_type == BGX_MODE_QSGMII) ||
+ (lmac->lmac_type == BGX_MODE_RGMII)) {
+ lmac->is_sgmii = true;
+ if (bgx_lmac_sgmii_init(bgx, lmac))
+ return -1;
+ } else {
+ lmac->is_sgmii = false;
+ if (bgx_lmac_xaui_init(bgx, lmac))
+ return -1;
+ }
+
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* actual number of filters available to exact LMAC */
+ lmac->dmacs_count = (RX_DMAC_COUNT / bgx->lmac_count);
+ lmac->dmacs = kcalloc(lmac->dmacs_count, sizeof(*lmac->dmacs),
+ GFP_KERNEL);
+ if (!lmac->dmacs)
+ return -ENOMEM;
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* Restore default cfg, incase low level firmware changed it */
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR)) {
+ if (!lmac->phydev) {
+ if (lmac->autoneg) {
+ bgx_reg_write(bgx, lmacid,
+ BGX_GMP_PCS_LINKX_TIMER,
+ PCS_LINKX_TIMER_COUNT);
+ goto poll;
+ } else {
+ /* Default to below link speed and duplex */
+ lmac->link_up = true;
+ lmac->last_speed = SPEED_1000;
+ lmac->last_duplex = DUPLEX_FULL;
+ bgx_sgmii_change_link_state(lmac);
+ return 0;
+ }
+ }
+ lmac->phydev->dev_flags = 0;
+
+ if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ bgx_lmac_handler,
+ phy_interface_mode(lmac->lmac_type)))
+ return -ENODEV;
+
+ phy_start(lmac->phydev);
+ return 0;
+ }
+
+poll:
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+
+ return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+ /* Destroy work queue */
+ cancel_delayed_work_sync(&lmac->dwork);
+ destroy_workqueue(lmac->check_link);
+ }
+
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_flush_dmac_cam_filter(bgx, lmacid);
+ kfree(lmac->dmacs);
+
+ if ((lmac->lmac_type != BGX_MODE_XFI) &&
+ (lmac->lmac_type != BGX_MODE_XLAUI) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR) &&
+ (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
+ phy_disconnect(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+ int i;
+ struct lmac *lmac;
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set lmac type and lane2serdes mapping */
+ for (i = 0; i < bgx->lmac_count; i++) {
+ lmac = &bgx->lmac[i];
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (lmac->lmac_type << 8) | lmac->lane_to_sds);
+ bgx->lmac[i].lmacid_bd = lmac_count;
+ lmac_count++;
+ }
+
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00);
+}
+
+static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac)
+{
+ return (u8)(bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG) & 0xFF);
+}
+
+static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
+{
+ struct device *dev = &bgx->pdev->dev;
+ struct lmac *lmac;
+ char str[27];
+
+ if (!bgx->is_dlm && lmacid)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ if (!bgx->is_dlm)
+ sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
+ else
+ sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
+
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ dev_info(dev, "%s: SGMII\n", (char *)str);
+ break;
+ case BGX_MODE_XAUI:
+ dev_info(dev, "%s: XAUI\n", (char *)str);
+ break;
+ case BGX_MODE_RXAUI:
+ dev_info(dev, "%s: RXAUI\n", (char *)str);
+ break;
+ case BGX_MODE_XFI:
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XFI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 10G_KR\n", (char *)str);
+ break;
+ case BGX_MODE_XLAUI:
+ if (!lmac->use_training)
+ dev_info(dev, "%s: XLAUI\n", (char *)str);
+ else
+ dev_info(dev, "%s: 40G_KR4\n", (char *)str);
+ break;
+ case BGX_MODE_QSGMII:
+ dev_info(dev, "%s: QSGMII\n", (char *)str);
+ break;
+ case BGX_MODE_RGMII:
+ dev_info(dev, "%s: RGMII\n", (char *)str);
+ break;
+ case BGX_MODE_INVALID:
+ /* Nothing to do */
+ break;
+ }
+}
+
+static void lmac_set_lane2sds(struct bgx *bgx, struct lmac *lmac)
+{
+ switch (lmac->lmac_type) {
+ case BGX_MODE_SGMII:
+ case BGX_MODE_XFI:
+ lmac->lane_to_sds = lmac->lmacid;
+ break;
+ case BGX_MODE_XAUI:
+ case BGX_MODE_XLAUI:
+ case BGX_MODE_RGMII:
+ lmac->lane_to_sds = 0xE4;
+ break;
+ case BGX_MODE_RXAUI:
+ lmac->lane_to_sds = (lmac->lmacid) ? 0xE : 0x4;
+ break;
+ case BGX_MODE_QSGMII:
+ /* There is no way to determine if DLM0/2 is QSGMII or
+ * DLM1/3 is configured to QSGMII as bootloader will
+ * configure all LMACs, so take whatever is configured
+ * by low level firmware.
+ */
+ lmac->lane_to_sds = bgx_get_lane2sds_cfg(bgx, lmac);
+ break;
+ default:
+ lmac->lane_to_sds = 0;
+ break;
+ }
+}
+
+static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
+{
+ if ((lmac->lmac_type != BGX_MODE_10G_KR) &&
+ (lmac->lmac_type != BGX_MODE_40G_KR)) {
+ lmac->use_training = false;
+ return;
+ }
+
+ lmac->use_training = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+}
+
+static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
+{
+ struct lmac *lmac;
+ u64 cmr_cfg;
+ u8 lmac_type;
+ u8 lane_to_sds;
+
+ lmac = &bgx->lmac[idx];
+
+ if (!bgx->is_dlm || bgx->is_rgx) {
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ cmr_cfg = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac->lmac_type = (cmr_cfg >> 8) & 0x07;
+ if (bgx->is_rgx)
+ lmac->lmac_type = BGX_MODE_RGMII;
+ lmac_set_training(bgx, lmac, 0);
+ lmac_set_lane2sds(bgx, lmac);
+ return;
+ }
+
+ /* For DLMs or SLMs on 80/81/83xx so many lane configurations
+ * are possible and vary across boards. Also Kernel doesn't have
+ * any way to identify board type/info and since firmware does,
+ * just take lmac type and serdes lane config as is.
+ */
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac->lane_to_sds = lane_to_sds;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct lmac *lmac;
+ u8 idx;
+
+ /* Init all LMAC's type to invalid */
+ for (idx = 0; idx < bgx->max_lmac; idx++) {
+ lmac = &bgx->lmac[idx];
+ lmac->lmacid = idx;
+ lmac->lmac_type = BGX_MODE_INVALID;
+ lmac->use_training = false;
+ }
+
+ /* It is assumed that low level firmware sets this value */
+ bgx->lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (bgx->lmac_count > bgx->max_lmac)
+ bgx->lmac_count = bgx->max_lmac;
+
+ for (idx = 0; idx < bgx->lmac_count; idx++) {
+ bgx_set_lmac_config(bgx, idx);
+ bgx_print_qlm_mode(bgx, idx);
+ }
+}
+
+#ifdef CONFIG_ACPI
+
+static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
+ u8 *dst)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
+ if (ret) {
+ dev_err(dev, "MAC address invalid: %pM\n", mac);
+ return -EINVAL;
+ }
+
+ dev_info(dev, "MAC address set to: %pM\n", mac);
+
+ ether_addr_copy(dst, mac);
+ return 0;
+}
+
+/* Currently only sets the MAC address. */
+static acpi_status bgx_acpi_register_phy(acpi_handle handle,
+ u32 lvl, void *context, void **rv)
+{
+ struct bgx *bgx = context;
+ struct device *dev = &bgx->pdev->dev;
+ struct acpi_device *adev;
+
+ adev = acpi_fetch_acpi_dev(handle);
+ if (!adev)
+ goto out;
+
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
+
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
+out:
+ return AE_OK;
+}
+
+static acpi_status bgx_acpi_match_id(acpi_handle handle, u32 lvl,
+ void *context, void **ret_val)
+{
+ struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct bgx *bgx = context;
+ char bgx_sel[5];
+
+ snprintf(bgx_sel, 5, "BGX%d", bgx->bgx_id);
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &string))) {
+ pr_warn("Invalid link device\n");
+ return AE_OK;
+ }
+
+ if (strncmp(string.pointer, bgx_sel, 4)) {
+ kfree(string.pointer);
+ return AE_OK;
+ }
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ bgx_acpi_register_phy, NULL, bgx, NULL);
+
+ kfree(string.pointer);
+ return AE_CTRL_TERMINATE;
+}
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ acpi_get_devices(NULL, bgx_acpi_match_id, bgx, (void **)NULL);
+ return 0;
+}
+
+#else
+
+static int bgx_init_acpi_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_ACPI */
+
+#if IS_ENABLED(CONFIG_OF_MDIO)
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ struct fwnode_handle *fwn;
+ struct device_node *node = NULL;
+ u8 lmac = 0;
+
+ device_for_each_child_node(&bgx->pdev->dev, fwn) {
+ struct phy_device *pd;
+ struct device_node *phy_np;
+
+ /* Should always be an OF node. But if it is not, we
+ * cannot handle it, so exit the loop.
+ */
+ node = to_of_node(fwn);
+ if (!node)
+ break;
+
+ of_get_mac_address(node, bgx->lmac[lmac].mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ bgx->lmac[lmac].lmacid = lmac;
+
+ phy_np = of_parse_phandle(node, "phy-handle", 0);
+ /* If there is no phy or defective firmware presents
+ * this cortina phy, for which there is no driver
+ * support, ignore it.
+ */
+ if (phy_np &&
+ !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) {
+ /* Wait until the phy drivers are available */
+ pd = of_phy_find_device(phy_np);
+ if (!pd)
+ goto defer;
+ bgx->lmac[lmac].phydev = pd;
+ }
+
+ lmac++;
+ if (lmac == bgx->max_lmac) {
+ of_node_put(node);
+ break;
+ }
+ }
+ return 0;
+
+defer:
+ /* We are bailing out, try not to leak device reference counts
+ * for phy devices we may have already found.
+ */
+ while (lmac) {
+ if (bgx->lmac[lmac].phydev) {
+ put_device(&bgx->lmac[lmac].phydev->mdio.dev);
+ bgx->lmac[lmac].phydev = NULL;
+ }
+ lmac--;
+ }
+ of_node_put(node);
+ return -EPROBE_DEFER;
+}
+
+#else
+
+static int bgx_init_of_phy(struct bgx *bgx)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_OF_MDIO */
+
+static int bgx_init_phy(struct bgx *bgx)
+{
+ if (!acpi_disabled)
+ return bgx_init_acpi_phy(bgx);
+
+ return bgx_init_of_phy(bgx);
+}
+
+static irqreturn_t bgx_intr_handler(int irq, void *data)
+{
+ struct bgx *bgx = (struct bgx *)data;
+ u64 status, val;
+ int lmac;
+
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
+ if (status & GMI_TXX_INT_UNDFLW) {
+ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
+ bgx->bgx_id, lmac);
+ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
+ val &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
+ val |= CMR_EN;
+ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
+ }
+ /* clear interrupts */
+ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bgx_register_intr(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
+ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ pci_err(pdev, "Req for #%d msix vectors failed\n",
+ BGX_LMAC_VEC_OFFSET);
+ return;
+ }
+ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
+ bgx, "BGX%d", bgx->bgx_id);
+ if (ret)
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct bgx *bgx = NULL;
+ u8 lmac;
+ u16 sdevid;
+
+ bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+ if (!bgx)
+ return -ENOMEM;
+ bgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, bgx);
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ pci_set_drvdata(pdev, NULL);
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!bgx->reg_base) {
+ dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ set_max_bgx_per_node(pdev);
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
+ if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
+ bgx->bgx_id = (pci_resource_start(pdev,
+ PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
+ bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
+ bgx->max_lmac = MAX_LMAC_PER_BGX;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ } else {
+ bgx->is_rgx = true;
+ bgx->max_lmac = 1;
+ bgx->bgx_id = MAX_BGX_PER_CN81XX - 1;
+ bgx_vnic[bgx->bgx_id] = bgx;
+ xcv_init_hw();
+ }
+
+ /* On 81xx all are DLMs and on 83xx there are 3 BGX QLMs and one
+ * BGX i.e BGX2 can be split across 2 DLMs.
+ */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if ((sdevid == PCI_SUBSYS_DEVID_81XX_BGX) ||
+ ((sdevid == PCI_SUBSYS_DEVID_83XX_BGX) && (bgx->bgx_id == 2)))
+ bgx->is_dlm = true;
+
+ bgx_get_qlm_mode(bgx);
+
+ err = bgx_init_phy(bgx);
+ if (err)
+ goto err_enable;
+
+ bgx_init_hw(bgx);
+
+ bgx_register_intr(pdev);
+
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+ if (err) {
+ dev_err(dev, "BGX%d failed to enable lmac%d\n",
+ bgx->bgx_id, lmac);
+ while (lmac)
+ bgx_lmac_disable(bgx, --lmac);
+ goto err_enable;
+ }
+ }
+
+ return 0;
+
+err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ u8 lmac;
+
+ /* Disable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+ .name = DRV_NAME,
+ .id_table = bgx_id_table,
+ .probe = bgx_probe,
+ .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+ pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 000000000..cdea49392
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+/* PCI device ID */
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+#define PCI_DEVICE_ID_THUNDER_RGX 0xA054
+
+/* Subsystem device IDs */
+#define PCI_SUBSYS_DEVID_88XX_BGX 0xA126
+#define PCI_SUBSYS_DEVID_81XX_BGX 0xA226
+#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254
+#define PCI_SUBSYS_DEVID_83XX_BGX 0xA326
+
+#define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */
+#define MAX_BGX_PER_CN88XX 2
+#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
+#define MAX_BGX_PER_CN83XX 4
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+#define DEFAULT_PAUSE_TIME 0xFFFF
+
+#define BGX_ID_MASK 0x3
+#define LMAC_ID_MASK 0x3
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+/* Registers */
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STEERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_LOOPBACK BIT_ULL(14)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_FRM_CTL 0x20020
+#define BGX_PKT_RX_PTP_EN BIT_ULL(12)
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_PAUSE_PKT_TIME 0x20110
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
+#define BGX_SMUX_TX_PAUSE_ZERO 0x20138
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+#define BGX_SMUX_CBFC_CTL 0x20218
+#define RX_EN BIT_ULL(0)
+#define TX_EN BIT_ULL(1)
+#define BCK_EN BIT_ULL(2)
+#define DRP_EN BIT_ULL(3)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_LINK BIT_ULL(2)
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_ADV 0x30010
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_LINKX_TIMER 0x30040
+#define PCS_LINKX_TIMER_COUNT 0x1E84
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_MODE BIT_ULL(8)
+#define PCS_MISC_CTL_DISP_EN BIT_ULL(13)
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define GMI_PORT_CFG_RX_IDLE BIT_ULL(12)
+#define GMI_PORT_CFG_TX_IDLE BIT_ULL(13)
+#define BGX_GMP_GMI_RXX_FRM_CTL 0x38028
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+#define BGX_GMP_GMI_TXX_INT 0x38500
+#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
+#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
+#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
+#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
+#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
+#define GMI_TXX_INT_XSDEF BIT_ULL(2)
+#define GMI_TXX_INT_XSCOL BIT_ULL(1)
+#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+#define BGX_XCAST_BCAST_ACCEPT BIT(0)
+#define BGX_XCAST_MCAST_ACCEPT BIT(1)
+#define BGX_XCAST_MCAST_FILTER BIT(2)
+
+void bgx_set_dmac_cam_filter(int node, int bgx_idx, int lmacid, u64 mac, u8 vf);
+void bgx_reset_xcast_mode(int node, int bgx_idx, int lmacid, u8 vf);
+void bgx_set_xcast_mode(int node, int bgx_idx, int lmacid, u8 mode);
+void octeon_mdiobus_force_mod_depencency(void);
+void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+void bgx_lmac_internal_loopback(int node, int bgx_idx,
+ int lmac_idx, bool enable);
+void bgx_config_timestamping(int node, int bgx_idx, int lmacid, bool enable);
+void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause);
+void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause);
+
+void xcv_init_hw(void);
+void xcv_setup_link(bool link_up, int link_speed);
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+ u64 rx_stats[BGX_RX_STATS_COUNT];
+ u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_RGMII = 5,
+ BGX_MODE_QSGMII = 6,
+ BGX_MODE_INVALID = 7,
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
new file mode 100644
index 000000000..3ebb93792
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2016 Cavium, Inc.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder_xcv"
+#define DRV_VERSION "1.0"
+
+/* Register offsets */
+#define XCV_RESET 0x00
+#define PORT_EN BIT_ULL(63)
+#define CLK_RESET BIT_ULL(15)
+#define DLL_RESET BIT_ULL(11)
+#define COMP_EN BIT_ULL(7)
+#define TX_PKT_RESET BIT_ULL(3)
+#define TX_DATA_RESET BIT_ULL(2)
+#define RX_PKT_RESET BIT_ULL(1)
+#define RX_DATA_RESET BIT_ULL(0)
+#define XCV_DLL_CTL 0x10
+#define CLKRX_BYP BIT_ULL(23)
+#define CLKTX_BYP BIT_ULL(15)
+#define XCV_COMP_CTL 0x20
+#define DRV_BYP BIT_ULL(63)
+#define XCV_CTL 0x30
+#define XCV_INT 0x40
+#define XCV_INT_W1S 0x48
+#define XCV_INT_ENA_W1C 0x50
+#define XCV_INT_ENA_W1S 0x58
+#define XCV_INBND_STATUS 0x80
+#define XCV_BATCH_CRD_RET 0x100
+
+struct xcv {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct xcv *xcv;
+
+/* Supported devices */
+static const struct pci_device_id xcv_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xA056) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder RGX/XCV Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, xcv_id_table);
+
+void xcv_init_hw(void)
+{
+ u64 cfg;
+
+ /* Take DLL out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~DLL_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Take clock tree out of reset */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ /* Wait for DLL to lock */
+ msleep(1);
+
+ /* Configure DLL - enable or bypass
+ * TX no bypass, RX bypass
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_DLL_CTL);
+ cfg &= ~0xFF03;
+ cfg |= CLKRX_BYP;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_DLL_CTL);
+
+ /* Enable compensation controller and force the
+ * write to be visible to HW by readig back.
+ */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= COMP_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ /* Wait for compensation state machine to lock */
+ msleep(10);
+
+ /* enable the XCV block */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= PORT_EN;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= CLK_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+}
+EXPORT_SYMBOL(xcv_init_hw);
+
+void xcv_setup_link(bool link_up, int link_speed)
+{
+ u64 cfg;
+ int speed = 2;
+
+ if (!xcv) {
+ pr_err("XCV init not done, probe may have failed\n");
+ return;
+ }
+
+ if (link_speed == 100)
+ speed = 1;
+ else if (link_speed == 10)
+ speed = 0;
+
+ if (link_up) {
+ /* set operating speed */
+ cfg = readq_relaxed(xcv->reg_base + XCV_CTL);
+ cfg &= ~0x03;
+ cfg |= speed;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_CTL);
+
+ /* Reset datapaths */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_DATA_RESET | RX_DATA_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Enable the packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg |= TX_PKT_RESET | RX_PKT_RESET;
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+
+ /* Return credits to RGX */
+ writeq_relaxed(0x01, xcv->reg_base + XCV_BATCH_CRD_RET);
+ } else {
+ /* Disable packet flow */
+ cfg = readq_relaxed(xcv->reg_base + XCV_RESET);
+ cfg &= ~(TX_PKT_RESET | RX_PKT_RESET);
+ writeq_relaxed(cfg, xcv->reg_base + XCV_RESET);
+ readq_relaxed(xcv->reg_base + XCV_RESET);
+ }
+}
+EXPORT_SYMBOL(xcv_setup_link);
+
+static int xcv_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+
+ xcv = devm_kzalloc(dev, sizeof(struct xcv), GFP_KERNEL);
+ if (!xcv)
+ return -ENOMEM;
+ xcv->pdev = pdev;
+
+ pci_set_drvdata(pdev, xcv);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ goto err_kfree;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ xcv->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!xcv->reg_base) {
+ dev_err(dev, "XCV: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_kfree:
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ return err;
+}
+
+static void xcv_remove(struct pci_dev *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (xcv) {
+ devm_kfree(dev, xcv);
+ xcv = NULL;
+ }
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver xcv_driver = {
+ .name = DRV_NAME,
+ .id_table = xcv_id_table,
+ .probe = xcv_probe,
+ .remove = xcv_remove,
+};
+
+static int __init xcv_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&xcv_driver);
+}
+
+static void __exit xcv_cleanup_module(void)
+{
+ pci_unregister_driver(&xcv_driver);
+}
+
+module_init(xcv_init_module);
+module_exit(xcv_cleanup_module);